From 132339666a7477a16750e03bf96024d2fa5f3044 Mon Sep 17 00:00:00 2001 From: Pulumi Bot <30351955+pulumi-bot@users.noreply.github.com> Date: Tue, 25 Jun 2024 22:22:27 -0700 Subject: [PATCH] Upgrade terraform-provider-databricks to v1.48.0 (#524) This PR was generated via `$ upgrade-provider pulumi/pulumi-databricks --kind=all --target-bridge-version=latest`. --- - Upgrading terraform-provider-databricks from 1.47.0 to 1.48.0. Fixes #523 --- .../bridge-metadata.json | 39 + .../pulumi-resource-databricks/schema.json | 233 ++- provider/go.mod | 39 +- provider/go.sum | 76 +- sdk/dotnet/Config/Config.cs | 7 + sdk/dotnet/GetAwsAssumeRolePolicy.cs | 4 +- sdk/dotnet/GetAwsBucketPolicy.cs | 4 +- sdk/dotnet/GetAwsCrossAccountPolicy.cs | 4 +- .../GetAwsUnityCatalogAssumeRolePolicy.cs | 12 +- sdk/dotnet/GetAwsUnityCatalogPolicy.cs | 8 +- sdk/dotnet/GetMwsCredentials.cs | 4 +- sdk/dotnet/GetMwsWorkspaces.cs | 4 +- ...GetExternalLocationExternalLocationInfo.cs | 3 + ...xternalLocationExternalLocationInfoArgs.cs | 3 + ...obJobSettingsSettingsEmailNotifications.cs | 8 + ...bSettingsSettingsEmailNotificationsArgs.cs | 8 + ...bSettingsSettingsTaskEmailNotifications.cs | 8 + ...tingsSettingsTaskEmailNotificationsArgs.cs | 8 + ...gsTaskForEachTaskTaskEmailNotifications.cs | 8 + ...skForEachTaskTaskEmailNotificationsArgs.cs | 8 + ...TaskForEachTaskTaskWebhookNotifications.cs | 8 + ...ForEachTaskTaskWebhookNotificationsArgs.cs | 8 + ...NotificationsOnStreamingBacklogExceeded.cs | 26 + ...ficationsOnStreamingBacklogExceededArgs.cs | 26 + ...ettingsSettingsTaskWebhookNotifications.cs | 8 + ...ngsSettingsTaskWebhookNotificationsArgs.cs | 8 + ...NotificationsOnStreamingBacklogExceeded.cs | 26 + ...ficationsOnStreamingBacklogExceededArgs.cs | 26 + ...JobSettingsSettingsWebhookNotifications.cs | 8 + ...ettingsSettingsWebhookNotificationsArgs.cs | 8 + ...NotificationsOnStreamingBacklogExceeded.cs | 26 + ...ficationsOnStreamingBacklogExceededArgs.cs | 26 + ...tStorageCredentialStorageCredentialInfo.cs | 3 + ...rageCredentialStorageCredentialInfoArgs.cs | 3 + .../Inputs/JobEmailNotificationsArgs.cs | 8 + .../Inputs/JobEmailNotificationsGetArgs.cs | 8 + .../Inputs/JobTaskEmailNotificationsArgs.cs | 8 + .../JobTaskEmailNotificationsGetArgs.cs | 8 + ...skForEachTaskTaskEmailNotificationsArgs.cs | 8 + ...orEachTaskTaskEmailNotificationsGetArgs.cs | 8 + ...ForEachTaskTaskWebhookNotificationsArgs.cs | 8 + ...EachTaskTaskWebhookNotificationsGetArgs.cs | 8 + ...ficationsOnStreamingBacklogExceededArgs.cs | 26 + ...ationsOnStreamingBacklogExceededGetArgs.cs | 26 + .../Inputs/JobTaskWebhookNotificationsArgs.cs | 8 + .../JobTaskWebhookNotificationsGetArgs.cs | 8 + ...ficationsOnStreamingBacklogExceededArgs.cs | 26 + ...ationsOnStreamingBacklogExceededGetArgs.cs | 26 + sdk/dotnet/Inputs/JobTriggerArgs.cs | 3 + sdk/dotnet/Inputs/JobTriggerGetArgs.cs | 3 + sdk/dotnet/Inputs/JobTriggerPeriodicArgs.cs | 26 + .../Inputs/JobTriggerPeriodicGetArgs.cs | 26 + .../Inputs/JobWebhookNotificationsArgs.cs | 8 + .../Inputs/JobWebhookNotificationsGetArgs.cs | 8 + ...ficationsOnStreamingBacklogExceededArgs.cs | 26 + ...ationsOnStreamingBacklogExceededGetArgs.cs | 26 + sdk/dotnet/IpAccessList.cs | 2 +- sdk/dotnet/MwsCredentials.cs | 2 +- sdk/dotnet/MwsCustomerManagedKeys.cs | 2 +- sdk/dotnet/MwsLogDelivery.cs | 8 +- sdk/dotnet/MwsNetworks.cs | 6 +- sdk/dotnet/MwsPrivateAccessSettings.cs | 6 +- sdk/dotnet/MwsStorageConfigurations.cs | 4 +- sdk/dotnet/OnlineTable.cs | 9 + ...ernalLocationExternalLocationInfoResult.cs | 4 + ...ettingsSettingsEmailNotificationsResult.cs | 4 + ...ngsSettingsTaskEmailNotificationsResult.cs | 4 + ...ForEachTaskTaskEmailNotificationsResult.cs | 4 + ...cationsOnStreamingBacklogExceededResult.cs | 27 + ...rEachTaskTaskWebhookNotificationsResult.cs | 4 + ...cationsOnStreamingBacklogExceededResult.cs | 27 + ...sSettingsTaskWebhookNotificationsResult.cs | 4 + ...cationsOnStreamingBacklogExceededResult.cs | 27 + ...tingsSettingsWebhookNotificationsResult.cs | 4 + ...geCredentialStorageCredentialInfoResult.cs | 4 + sdk/dotnet/Outputs/JobEmailNotifications.cs | 4 + .../Outputs/JobTaskEmailNotifications.cs | 4 + ...obTaskForEachTaskTaskEmailNotifications.cs | 4 + ...TaskForEachTaskTaskWebhookNotifications.cs | 4 + ...NotificationsOnStreamingBacklogExceeded.cs | 27 + .../Outputs/JobTaskWebhookNotifications.cs | 4 + ...NotificationsOnStreamingBacklogExceeded.cs | 27 + sdk/dotnet/Outputs/JobTrigger.cs | 4 + sdk/dotnet/Outputs/JobTriggerPeriodic.cs | 29 + sdk/dotnet/Outputs/JobWebhookNotifications.cs | 4 + ...NotificationsOnStreamingBacklogExceeded.cs | 27 + sdk/dotnet/Provider.cs | 6 + sdk/dotnet/SqlPermissions.cs | 9 + sdk/go/databricks/config/config.go | 3 + sdk/go/databricks/getAwsAssumeRolePolicy.go | 2 +- sdk/go/databricks/getAwsBucketPolicy.go | 4 +- sdk/go/databricks/getAwsCrossAccountPolicy.go | 2 +- .../getAwsUnityCatalogAssumeRolePolicy.go | 8 +- sdk/go/databricks/getAwsUnityCatalogPolicy.go | 4 +- sdk/go/databricks/getMwsCredentials.go | 2 +- sdk/go/databricks/getMwsWorkspaces.go | 2 +- sdk/go/databricks/ipAccessList.go | 2 +- sdk/go/databricks/mwsCredentials.go | 2 +- sdk/go/databricks/mwsCustomerManagedKeys.go | 2 +- sdk/go/databricks/mwsLogDelivery.go | 14 +- sdk/go/databricks/mwsNetworks.go | 6 +- sdk/go/databricks/mwsPrivateAccessSettings.go | 6 +- sdk/go/databricks/mwsStorageConfigurations.go | 4 +- sdk/go/databricks/onlineTable.go | 19 +- sdk/go/databricks/provider.go | 7 + sdk/go/databricks/pulumiTypes.go | 1258 ++++++++--------- sdk/go/databricks/pulumiTypes1.go | 1047 +++++++++++++- sdk/go/databricks/sqlPermissions.go | 18 +- .../java/com/pulumi/databricks/Config.java | 3 + .../databricks/DatabricksFunctions.java | 76 +- .../com/pulumi/databricks/IpAccessList.java | 2 +- .../com/pulumi/databricks/MwsCredentials.java | 2 +- .../databricks/MwsCustomerManagedKeys.java | 2 +- .../com/pulumi/databricks/MwsLogDelivery.java | 6 +- .../pulumi/databricks/MwsLogDeliveryArgs.java | 10 +- .../com/pulumi/databricks/MwsNetworks.java | 6 +- .../databricks/MwsPrivateAccessSettings.java | 6 +- .../databricks/MwsStorageConfigurations.java | 4 +- .../com/pulumi/databricks/OnlineTable.java | 6 + .../pulumi/databricks/OnlineTableArgs.java | 17 + .../java/com/pulumi/databricks/Provider.java | 6 + .../com/pulumi/databricks/ProviderArgs.java | 17 + .../com/pulumi/databricks/SqlPermissions.java | 8 + .../pulumi/databricks/SqlPermissionsArgs.java | 20 + .../inputs/GetAwsBucketPolicyArgs.java | 8 +- .../inputs/GetAwsBucketPolicyPlainArgs.java | 6 +- ...etAwsUnityCatalogAssumeRolePolicyArgs.java | 8 +- ...UnityCatalogAssumeRolePolicyPlainArgs.java | 6 +- ...tExternalLocationExternalLocationInfo.java | 13 + ...ernalLocationExternalLocationInfoArgs.java | 17 + ...JobSettingsSettingsEmailNotifications.java | 17 + ...ettingsSettingsEmailNotificationsArgs.java | 21 + ...ettingsSettingsTaskEmailNotifications.java | 17 + ...ngsSettingsTaskEmailNotificationsArgs.java | 21 + ...TaskForEachTaskTaskEmailNotifications.java | 17 + ...ForEachTaskTaskEmailNotificationsArgs.java | 21 + ...skForEachTaskTaskWebhookNotifications.java | 18 + ...rEachTaskTaskWebhookNotificationsArgs.java | 22 + ...tificationsOnStreamingBacklogExceeded.java | 74 + ...cationsOnStreamingBacklogExceededArgs.java | 85 ++ ...tingsSettingsTaskWebhookNotifications.java | 18 + ...sSettingsTaskWebhookNotificationsArgs.java | 22 + ...tificationsOnStreamingBacklogExceeded.java | 74 + ...cationsOnStreamingBacklogExceededArgs.java | 85 ++ ...bSettingsSettingsWebhookNotifications.java | 18 + ...tingsSettingsWebhookNotificationsArgs.java | 22 + ...tificationsOnStreamingBacklogExceeded.java | 74 + ...cationsOnStreamingBacklogExceededArgs.java | 85 ++ ...torageCredentialStorageCredentialInfo.java | 13 + ...geCredentialStorageCredentialInfoArgs.java | 17 + .../inputs/JobEmailNotificationsArgs.java | 21 + .../inputs/JobTaskEmailNotificationsArgs.java | 21 + ...ForEachTaskTaskEmailNotificationsArgs.java | 21 + ...rEachTaskTaskWebhookNotificationsArgs.java | 22 + ...cationsOnStreamingBacklogExceededArgs.java | 85 ++ .../JobTaskWebhookNotificationsArgs.java | 22 + ...cationsOnStreamingBacklogExceededArgs.java | 85 ++ .../databricks/inputs/JobTriggerArgs.java | 18 + .../inputs/JobTriggerPeriodicArgs.java | 86 ++ .../inputs/JobWebhookNotificationsArgs.java | 22 + ...cationsOnStreamingBacklogExceededArgs.java | 85 ++ .../inputs/MwsLogDeliveryState.java | 10 +- .../databricks/inputs/OnlineTableState.java | 17 + .../inputs/SqlPermissionsState.java | 20 + ...tExternalLocationExternalLocationInfo.java | 13 + ...JobSettingsSettingsEmailNotifications.java | 16 + ...ettingsSettingsTaskEmailNotifications.java | 16 + ...TaskForEachTaskTaskEmailNotifications.java | 16 + ...skForEachTaskTaskWebhookNotifications.java | 17 + ...tificationsOnStreamingBacklogExceeded.java | 58 + ...tingsSettingsTaskWebhookNotifications.java | 17 + ...tificationsOnStreamingBacklogExceeded.java | 58 + ...bSettingsSettingsWebhookNotifications.java | 17 + ...tificationsOnStreamingBacklogExceeded.java | 58 + ...torageCredentialStorageCredentialInfo.java | 13 + .../outputs/JobEmailNotifications.java | 16 + .../outputs/JobTaskEmailNotifications.java | 16 + ...TaskForEachTaskTaskEmailNotifications.java | 16 + ...skForEachTaskTaskWebhookNotifications.java | 17 + ...tificationsOnStreamingBacklogExceeded.java | 58 + .../outputs/JobTaskWebhookNotifications.java | 17 + ...tificationsOnStreamingBacklogExceeded.java | 58 + .../pulumi/databricks/outputs/JobTrigger.java | 14 + .../outputs/JobTriggerPeriodic.java | 66 + .../outputs/JobWebhookNotifications.java | 17 + ...tificationsOnStreamingBacklogExceeded.java | 58 + sdk/nodejs/config/vars.ts | 8 + sdk/nodejs/getAwsAssumeRolePolicy.ts | 4 +- sdk/nodejs/getAwsBucketPolicy.ts | 4 +- sdk/nodejs/getAwsCrossAccountPolicy.ts | 4 +- .../getAwsUnityCatalogAssumeRolePolicy.ts | 12 +- sdk/nodejs/getAwsUnityCatalogPolicy.ts | 8 +- sdk/nodejs/getMwsCredentials.ts | 4 +- sdk/nodejs/getMwsWorkspaces.ts | 4 +- sdk/nodejs/ipAccessList.ts | 2 +- sdk/nodejs/mwsCredentials.ts | 2 +- sdk/nodejs/mwsCustomerManagedKeys.ts | 2 +- sdk/nodejs/mwsLogDelivery.ts | 8 +- sdk/nodejs/mwsNetworks.ts | 6 +- sdk/nodejs/mwsPrivateAccessSettings.ts | 6 +- sdk/nodejs/mwsStorageConfigurations.ts | 4 +- sdk/nodejs/onlineTable.ts | 5 + sdk/nodejs/provider.ts | 3 + sdk/nodejs/sqlPermissions.ts | 9 + sdk/nodejs/types/input.ts | 91 ++ sdk/nodejs/types/output.ts | 62 + sdk/python/pulumi_databricks/_inputs.py | 346 +++++ .../pulumi_databricks/config/__init__.pyi | 2 + sdk/python/pulumi_databricks/config/vars.py | 4 + .../get_aws_assume_role_policy.py | 4 +- .../get_aws_bucket_policy.py | 4 +- .../get_aws_cross_account_policy.py | 4 +- ...et_aws_unity_catalog_assume_role_policy.py | 12 +- .../get_aws_unity_catalog_policy.py | 8 +- .../pulumi_databricks/get_mws_credentials.py | 4 +- .../pulumi_databricks/get_mws_workspaces.py | 4 +- .../pulumi_databricks/ip_access_list.py | 4 +- .../pulumi_databricks/mws_credentials.py | 4 +- .../mws_customer_managed_keys.py | 4 +- .../pulumi_databricks/mws_log_delivery.py | 18 +- sdk/python/pulumi_databricks/mws_networks.py | 12 +- .../mws_private_access_settings.py | 12 +- .../mws_storage_configurations.py | 8 +- sdk/python/pulumi_databricks/online_table.py | 40 +- sdk/python/pulumi_databricks/outputs.py | 266 ++++ sdk/python/pulumi_databricks/provider.py | 20 + .../pulumi_databricks/sql_permissions.py | 13 + 227 files changed, 5975 insertions(+), 968 deletions(-) create mode 100644 sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs create mode 100644 sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs create mode 100644 sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.cs create mode 100644 sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs create mode 100644 sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.cs create mode 100644 sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.cs create mode 100644 sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs create mode 100644 sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs create mode 100644 sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs create mode 100644 sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs create mode 100644 sdk/dotnet/Inputs/JobTriggerPeriodicArgs.cs create mode 100644 sdk/dotnet/Inputs/JobTriggerPeriodicGetArgs.cs create mode 100644 sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.cs create mode 100644 sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs create mode 100644 sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs create mode 100644 sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs create mode 100644 sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult.cs create mode 100644 sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs create mode 100644 sdk/dotnet/Outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.cs create mode 100644 sdk/dotnet/Outputs/JobTriggerPeriodic.cs create mode 100644 sdk/dotnet/Outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.cs create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerPeriodicArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTriggerPeriodic.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.java diff --git a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json index fc1278a0..e79d5c65 100644 --- a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json +++ b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json @@ -437,6 +437,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -856,6 +859,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -899,6 +905,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -1178,6 +1187,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -1463,6 +1475,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -1479,6 +1494,9 @@ "file_arrival": { "maxItemsOne": true }, + "periodic": { + "maxItemsOne": true + }, "table": { "maxItemsOne": true, "elem": { @@ -1515,6 +1533,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -3039,6 +3060,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -3395,6 +3419,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -3438,6 +3465,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -3668,6 +3698,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -3904,6 +3937,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } @@ -3946,6 +3982,9 @@ "on_start": { "maxItemsOne": false }, + "on_streaming_backlog_exceeded": { + "maxItemsOne": false + }, "on_success": { "maxItemsOne": false } diff --git a/provider/cmd/pulumi-resource-databricks/schema.json b/provider/cmd/pulumi-resource-databricks/schema.json index 90edc5e1..864589e6 100644 --- a/provider/cmd/pulumi-resource-databricks/schema.json +++ b/provider/cmd/pulumi-resource-databricks/schema.json @@ -137,6 +137,9 @@ "retryTimeoutSeconds": { "type": "integer" }, + "serverlessComputeId": { + "type": "string" + }, "skipVerify": { "type": "boolean" }, @@ -1154,6 +1157,12 @@ }, "description": "(List) list of emails to notify when the run starts.\n" }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "type": "string" + } + }, "onSuccesses": { "type": "array", "items": { @@ -2955,6 +2964,12 @@ }, "description": "(List) list of emails to notify when the run starts.\n" }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "type": "string" + } + }, "onSuccesses": { "type": "array", "items": { @@ -3211,6 +3226,12 @@ }, "description": "(List) list of emails to notify when the run starts.\n" }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "type": "string" + } + }, "onSuccesses": { "type": "array", "items": { @@ -4265,6 +4286,12 @@ }, "description": "(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.\n" }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded:JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded" + } + }, "onSuccesses": { "type": "array", "items": { @@ -4311,6 +4338,18 @@ "id" ] }, + "databricks:index/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded:JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded": { + "properties": { + "id": { + "type": "string", + "description": "ID of the job\n" + } + }, + "type": "object", + "required": [ + "id" + ] + }, "databricks:index/JobTaskForEachTaskTaskWebhookNotificationsOnSuccess:JobTaskForEachTaskTaskWebhookNotificationsOnSuccess": { "properties": { "id": { @@ -5367,6 +5406,12 @@ }, "description": "(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.\n" }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/JobTaskWebhookNotificationsOnStreamingBacklogExceeded:JobTaskWebhookNotificationsOnStreamingBacklogExceeded" + } + }, "onSuccesses": { "type": "array", "items": { @@ -5413,6 +5458,18 @@ "id" ] }, + "databricks:index/JobTaskWebhookNotificationsOnStreamingBacklogExceeded:JobTaskWebhookNotificationsOnStreamingBacklogExceeded": { + "properties": { + "id": { + "type": "string", + "description": "ID of the job\n" + } + }, + "type": "object", + "required": [ + "id" + ] + }, "databricks:index/JobTaskWebhookNotificationsOnSuccess:JobTaskWebhookNotificationsOnSuccess": { "properties": { "id": { @@ -5435,6 +5492,9 @@ "type": "string", "description": "Indicate whether this trigger is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pause_status` field is omitted in the block, the server will default to using `UNPAUSED` as a value for `pause_status`.\n" }, + "periodic": { + "$ref": "#/types/databricks:index/JobTriggerPeriodic:JobTriggerPeriodic" + }, "table": { "$ref": "#/types/databricks:index/JobTriggerTable:JobTriggerTable" }, @@ -5464,6 +5524,21 @@ "url" ] }, + "databricks:index/JobTriggerPeriodic:JobTriggerPeriodic": { + "properties": { + "interval": { + "type": "integer" + }, + "unit": { + "type": "string" + } + }, + "type": "object", + "required": [ + "interval", + "unit" + ] + }, "databricks:index/JobTriggerTable:JobTriggerTable": { "properties": { "condition": { @@ -5530,6 +5605,12 @@ }, "description": "(List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified.\n" }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/JobWebhookNotificationsOnStreamingBacklogExceeded:JobWebhookNotificationsOnStreamingBacklogExceeded" + } + }, "onSuccesses": { "type": "array", "items": { @@ -5576,6 +5657,18 @@ "id" ] }, + "databricks:index/JobWebhookNotificationsOnStreamingBacklogExceeded:JobWebhookNotificationsOnStreamingBacklogExceeded": { + "properties": { + "id": { + "type": "string", + "description": "ID of the job\n" + } + }, + "type": "object", + "required": [ + "id" + ] + }, "databricks:index/JobWebhookNotificationsOnSuccess:JobWebhookNotificationsOnSuccess": { "properties": { "id": { @@ -9673,6 +9766,9 @@ "$ref": "#/types/databricks:index/getExternalLocationExternalLocationInfoEncryptionDetails:getExternalLocationExternalLocationInfoEncryptionDetails", "description": "The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS).\n" }, + "isolationMode": { + "type": "string" + }, "metastoreId": { "type": "string", "description": "Unique identifier of the parent Metastore.\n" @@ -10280,6 +10376,12 @@ "type": "string" } }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "type": "string" + } + }, "onSuccesses": { "type": "array", "items": { @@ -11798,6 +11900,12 @@ "type": "string" } }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "type": "string" + } + }, "onSuccesses": { "type": "array", "items": { @@ -12016,6 +12124,12 @@ "type": "string" } }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "type": "string" + } + }, "onSuccesses": { "type": "array", "items": { @@ -12874,6 +12988,12 @@ "$ref": "#/types/databricks:index/getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart:getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart" } }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded:getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded" + } + }, "onSuccesses": { "type": "array", "items": { @@ -12919,6 +13039,18 @@ "id" ] }, + "databricks:index/getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded:getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded": { + "properties": { + "id": { + "type": "string", + "description": "the id of databricks.Job if the resource was matched by name.\n" + } + }, + "type": "object", + "required": [ + "id" + ] + }, "databricks:index/getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess:getJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess": { "properties": { "id": { @@ -13780,6 +13912,12 @@ "$ref": "#/types/databricks:index/getJobJobSettingsSettingsTaskWebhookNotificationsOnStart:getJobJobSettingsSettingsTaskWebhookNotificationsOnStart" } }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/getJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded:getJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded" + } + }, "onSuccesses": { "type": "array", "items": { @@ -13825,6 +13963,18 @@ "id" ] }, + "databricks:index/getJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded:getJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded": { + "properties": { + "id": { + "type": "string", + "description": "the id of databricks.Job if the resource was matched by name.\n" + } + }, + "type": "object", + "required": [ + "id" + ] + }, "databricks:index/getJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess:getJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess": { "properties": { "id": { @@ -13911,6 +14061,12 @@ "$ref": "#/types/databricks:index/getJobJobSettingsSettingsWebhookNotificationsOnStart:getJobJobSettingsSettingsWebhookNotificationsOnStart" } }, + "onStreamingBacklogExceededs": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/getJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded:getJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded" + } + }, "onSuccesses": { "type": "array", "items": { @@ -13956,6 +14112,18 @@ "id" ] }, + "databricks:index/getJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded:getJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded": { + "properties": { + "id": { + "type": "string", + "description": "the id of databricks.Job if the resource was matched by name.\n" + } + }, + "type": "object", + "required": [ + "id" + ] + }, "databricks:index/getJobJobSettingsSettingsWebhookNotificationsOnSuccess:getJobJobSettingsSettingsWebhookNotificationsOnSuccess": { "properties": { "id": { @@ -14350,6 +14518,9 @@ "type": "string", "description": "Unique ID of storage credential.\n" }, + "isolationMode": { + "type": "string" + }, "metastoreId": { "type": "string", "description": "Unique identifier of the parent Metastore.\n" @@ -14919,6 +15090,9 @@ "retryTimeoutSeconds": { "type": "integer" }, + "serverlessComputeId": { + "type": "string" + }, "skipVerify": { "type": "boolean" }, @@ -15014,6 +15188,9 @@ "retryTimeoutSeconds": { "type": "integer" }, + "serverlessComputeId": { + "type": "string" + }, "skipVerify": { "type": "boolean" }, @@ -18002,7 +18179,7 @@ } }, "databricks:index/ipAccessList:IpAccessList": { - "description": "Security-conscious enterprises that use cloud SaaS applications need to restrict access to their own employees. Authentication helps to prove user identity, but that does not enforce network location of the users. Accessing a cloud service from an unsecured network can pose security risks to an enterprise, especially when the user may have authorized access to sensitive or personal data. Enterprise network perimeters apply security policies and limit access to external services (for example, firewalls, proxies, DLP, and logging), so access beyond these controls are assumed to be untrusted. Please see [IP Access List](https://docs.databricks.com/security/network/ip-access-list.html) for full feature documentation.\n\n\u003e **Note** The total number of IP addresses and CIDR scopes provided across all ACL Lists in a workspace can not exceed 1000. Refer to the docs above for specifics.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.WorkspaceConf(\"this\", {customConfig: {\n enableIpAccessLists: true,\n}});\nconst allowed_list = new databricks.IpAccessList(\"allowed-list\", {\n label: \"allow_in\",\n listType: \"ALLOW\",\n ipAddresses: [\n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\",\n ],\n}, {\n dependsOn: [_this],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.WorkspaceConf(\"this\", custom_config={\n \"enableIpAccessLists\": True,\n})\nallowed_list = databricks.IpAccessList(\"allowed-list\",\n label=\"allow_in\",\n list_type=\"ALLOW\",\n ip_addresses=[\n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\",\n ],\n opts = pulumi.ResourceOptions(depends_on=[this]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.WorkspaceConf(\"this\", new()\n {\n CustomConfig = \n {\n { \"enableIpAccessLists\", true },\n },\n });\n\n var allowed_list = new Databricks.IpAccessList(\"allowed-list\", new()\n {\n Label = \"allow_in\",\n ListType = \"ALLOW\",\n IpAddresses = new[]\n {\n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n @this,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthis, err := databricks.NewWorkspaceConf(ctx, \"this\", \u0026databricks.WorkspaceConfArgs{\n\t\t\tCustomConfig: pulumi.Map{\n\t\t\t\t\"enableIpAccessLists\": pulumi.Any(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewIpAccessList(ctx, \"allowed-list\", \u0026databricks.IpAccessListArgs{\n\t\t\tLabel: pulumi.String(\"allow_in\"),\n\t\t\tListType: pulumi.String(\"ALLOW\"),\n\t\t\tIpAddresses: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"1.1.1.1\"),\n\t\t\t\tpulumi.String(\"1.2.3.0/24\"),\n\t\t\t\tpulumi.String(\"1.2.5.0/24\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tthis,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.WorkspaceConf;\nimport com.pulumi.databricks.WorkspaceConfArgs;\nimport com.pulumi.databricks.IpAccessList;\nimport com.pulumi.databricks.IpAccessListArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new WorkspaceConf(\"this\", WorkspaceConfArgs.builder()\n .customConfig(Map.of(\"enableIpAccessLists\", true))\n .build());\n\n var allowed_list = new IpAccessList(\"allowed-list\", IpAccessListArgs.builder()\n .label(\"allow_in\")\n .listType(\"ALLOW\")\n .ipAddresses( \n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(this_)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:WorkspaceConf\n properties:\n customConfig:\n enableIpAccessLists: true\n allowed-list:\n type: databricks:IpAccessList\n properties:\n label: allow_in\n listType: ALLOW\n ipAddresses:\n - 1.1.1.1\n - 1.2.3.0/24\n - 1.2.5.0/24\n options:\n dependson:\n - ${this}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* End to end workspace management guide.\n* Provisioning AWS Databricks E2 with a Hub \u0026 Spoke firewall for data exfiltration protection guide.\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html).\n* databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace.\n* databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html).\n\n## Import\n\nThe databricks_ip_access_list can be imported using id:\n\nbash\n\n```sh\n$ pulumi import databricks:index/ipAccessList:IpAccessList this \u003clist-id\u003e\n```\n\n", + "description": "Security-conscious enterprises that use cloud SaaS applications need to restrict access to their own employees. Authentication helps to prove user identity, but that does not enforce network location of the users. Accessing a cloud service from an unsecured network can pose security risks to an enterprise, especially when the user may have authorized access to sensitive or personal data. Enterprise network perimeters apply security policies and limit access to external services (for example, firewalls, proxies, DLP, and logging), so access beyond these controls are assumed to be untrusted. Please see [IP Access List](https://docs.databricks.com/security/network/ip-access-list.html) for full feature documentation.\n\n\u003e **Note** The total number of IP addresses and CIDR scopes provided across all ACL Lists in a workspace can not exceed 1000. Refer to the docs above for specifics.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.WorkspaceConf(\"this\", {customConfig: {\n enableIpAccessLists: true,\n}});\nconst allowed_list = new databricks.IpAccessList(\"allowed-list\", {\n label: \"allow_in\",\n listType: \"ALLOW\",\n ipAddresses: [\n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\",\n ],\n}, {\n dependsOn: [_this],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.WorkspaceConf(\"this\", custom_config={\n \"enableIpAccessLists\": True,\n})\nallowed_list = databricks.IpAccessList(\"allowed-list\",\n label=\"allow_in\",\n list_type=\"ALLOW\",\n ip_addresses=[\n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\",\n ],\n opts = pulumi.ResourceOptions(depends_on=[this]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.WorkspaceConf(\"this\", new()\n {\n CustomConfig = \n {\n { \"enableIpAccessLists\", true },\n },\n });\n\n var allowed_list = new Databricks.IpAccessList(\"allowed-list\", new()\n {\n Label = \"allow_in\",\n ListType = \"ALLOW\",\n IpAddresses = new[]\n {\n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n @this,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthis, err := databricks.NewWorkspaceConf(ctx, \"this\", \u0026databricks.WorkspaceConfArgs{\n\t\t\tCustomConfig: pulumi.Map{\n\t\t\t\t\"enableIpAccessLists\": pulumi.Any(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewIpAccessList(ctx, \"allowed-list\", \u0026databricks.IpAccessListArgs{\n\t\t\tLabel: pulumi.String(\"allow_in\"),\n\t\t\tListType: pulumi.String(\"ALLOW\"),\n\t\t\tIpAddresses: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"1.1.1.1\"),\n\t\t\t\tpulumi.String(\"1.2.3.0/24\"),\n\t\t\t\tpulumi.String(\"1.2.5.0/24\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tthis,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.WorkspaceConf;\nimport com.pulumi.databricks.WorkspaceConfArgs;\nimport com.pulumi.databricks.IpAccessList;\nimport com.pulumi.databricks.IpAccessListArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new WorkspaceConf(\"this\", WorkspaceConfArgs.builder()\n .customConfig(Map.of(\"enableIpAccessLists\", true))\n .build());\n\n var allowed_list = new IpAccessList(\"allowed-list\", IpAccessListArgs.builder()\n .label(\"allow_in\")\n .listType(\"ALLOW\")\n .ipAddresses( \n \"1.1.1.1\",\n \"1.2.3.0/24\",\n \"1.2.5.0/24\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(this_)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:WorkspaceConf\n properties:\n customConfig:\n enableIpAccessLists: true\n allowed-list:\n type: databricks:IpAccessList\n properties:\n label: allow_in\n listType: ALLOW\n ipAddresses:\n - 1.1.1.1\n - 1.2.3.0/24\n - 1.2.5.0/24\n options:\n dependson:\n - ${this}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* End to end workspace management guide.\n* Provisioning AWS Databricks workspaces with a Hub \u0026 Spoke firewall for data exfiltration protection guide.\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html).\n* databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace.\n* databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html).\n\n## Import\n\nThe databricks_ip_access_list can be imported using id:\n\nbash\n\n```sh\n$ pulumi import databricks:index/ipAccessList:IpAccessList this \u003clist-id\u003e\n```\n\n", "properties": { "enabled": { "type": "boolean", @@ -20010,7 +20187,7 @@ } }, "databricks:index/mwsCredentials:MwsCredentials": { - "description": "\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nThis resource to configure the cross-account role for creation of new workspaces within AWS.\n\nPlease follow this complete runnable example Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/)\n* `credentials_name` - (Required) name of credentials to register\n* `role_arn` - (Required) ARN of cross-account role\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\nThis resource can be imported by the combination of its identifier and the account id:\n\nbash\n\n```sh\n$ pulumi import databricks:index/mwsCredentials:MwsCredentials this \u003caccount_id\u003e/\u003ccredentials_id\u003e\n```\n\n", + "description": "\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nThis resource to configure the cross-account role for creation of new workspaces within AWS.\n\nPlease follow this complete runnable example Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/)\n* `credentials_name` - (Required) name of credentials to register\n* `role_arn` - (Required) ARN of cross-account role\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\nThis resource can be imported by the combination of its identifier and the account id:\n\nbash\n\n```sh\n$ pulumi import databricks:index/mwsCredentials:MwsCredentials this \u003caccount_id\u003e/\u003ccredentials_id\u003e\n```\n\n", "properties": { "accountId": { "type": "string", @@ -20103,7 +20280,7 @@ } }, "databricks:index/mwsCustomerManagedKeys:MwsCustomerManagedKeys": { - "description": "## Example Usage\n\n\u003e **Note** If you've used the resource before, please add `use_cases = [\"MANAGED_SERVICES\"]` to keep the previous behaviour.\n\n### Customer-managed key for managed services\n\nYou must configure this during workspace creation\n\n### For AWS\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst current = aws.getCallerIdentity({});\nconst databricksManagedServicesCmk = current.then(current =\u003e aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [current.accountId],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for control plane managed services\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources: [\"*\"],\n },\n ],\n}));\nconst managedServicesCustomerManagedKey = new aws.kms.Key(\"managed_services_customer_managed_key\", {policy: databricksManagedServicesCmk.then(databricksManagedServicesCmk =\u003e databricksManagedServicesCmk.json)});\nconst managedServicesCustomerManagedKeyAlias = new aws.kms.Alias(\"managed_services_customer_managed_key_alias\", {\n name: \"alias/managed-services-customer-managed-key-alias\",\n targetKeyId: managedServicesCustomerManagedKey.keyId,\n});\nconst managedServices = new databricks.MwsCustomerManagedKeys(\"managed_services\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: managedServicesCustomerManagedKey.arn,\n keyAlias: managedServicesCustomerManagedKeyAlias.name,\n },\n useCases: [\"MANAGED_SERVICES\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ncurrent = aws.get_caller_identity()\ndatabricks_managed_services_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[current.account_id],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for control plane managed services\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources=[\"*\"],\n ),\n ])\nmanaged_services_customer_managed_key = aws.kms.Key(\"managed_services_customer_managed_key\", policy=databricks_managed_services_cmk.json)\nmanaged_services_customer_managed_key_alias = aws.kms.Alias(\"managed_services_customer_managed_key_alias\",\n name=\"alias/managed-services-customer-managed-key-alias\",\n target_key_id=managed_services_customer_managed_key.key_id)\nmanaged_services = databricks.MwsCustomerManagedKeys(\"managed_services\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=managed_services_customer_managed_key.arn,\n key_alias=managed_services_customer_managed_key_alias.name,\n ),\n use_cases=[\"MANAGED_SERVICES\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var current = Aws.GetCallerIdentity.Invoke();\n\n var databricksManagedServicesCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n current.Apply(getCallerIdentityResult =\u003e getCallerIdentityResult.AccountId),\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for control plane managed services\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n },\n });\n\n var managedServicesCustomerManagedKey = new Aws.Kms.Key(\"managed_services_customer_managed_key\", new()\n {\n Policy = databricksManagedServicesCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var managedServicesCustomerManagedKeyAlias = new Aws.Kms.Alias(\"managed_services_customer_managed_key_alias\", new()\n {\n Name = \"alias/managed-services-customer-managed-key-alias\",\n TargetKeyId = managedServicesCustomerManagedKey.KeyId,\n });\n\n var managedServices = new Databricks.MwsCustomerManagedKeys(\"managed_services\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = managedServicesCustomerManagedKey.Arn,\n KeyAlias = managedServicesCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"MANAGED_SERVICES\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\ncurrent, err := aws.GetCallerIdentity(ctx, nil, nil);\nif err != nil {\nreturn err\n}\ndatabricksManagedServicesCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ncurrent.AccountId,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for control plane managed services\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n},\nResources: []string{\n\"*\",\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKey, err := kms.NewKey(ctx, \"managed_services_customer_managed_key\", \u0026kms.KeyArgs{\nPolicy: pulumi.String(databricksManagedServicesCmk.Json),\n})\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"managed_services_customer_managed_key_alias\", \u0026kms.AliasArgs{\nName: pulumi.String(\"alias/managed-services-customer-managed-key-alias\"),\nTargetKeyId: managedServicesCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"managed_services\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: managedServicesCustomerManagedKey.Arn,\nKeyAlias: managedServicesCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"MANAGED_SERVICES\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.AwsFunctions;\nimport com.pulumi.aws.inputs.GetCallerIdentityArgs;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var current = AwsFunctions.getCallerIdentity();\n\n final var databricksManagedServicesCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(current.applyValue(getCallerIdentityResult -\u003e getCallerIdentityResult.accountId()))\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for control plane managed services\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\")\n .resources(\"*\")\n .build())\n .build());\n\n var managedServicesCustomerManagedKey = new Key(\"managedServicesCustomerManagedKey\", KeyArgs.builder()\n .policy(databricksManagedServicesCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var managedServicesCustomerManagedKeyAlias = new Alias(\"managedServicesCustomerManagedKeyAlias\", AliasArgs.builder()\n .name(\"alias/managed-services-customer-managed-key-alias\")\n .targetKeyId(managedServicesCustomerManagedKey.keyId())\n .build());\n\n var managedServices = new MwsCustomerManagedKeys(\"managedServices\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(managedServicesCustomerManagedKey.arn())\n .keyAlias(managedServicesCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"MANAGED_SERVICES\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\nresources:\n managedServicesCustomerManagedKey:\n type: aws:kms:Key\n name: managed_services_customer_managed_key\n properties:\n policy: ${databricksManagedServicesCmk.json}\n managedServicesCustomerManagedKeyAlias:\n type: aws:kms:Alias\n name: managed_services_customer_managed_key_alias\n properties:\n name: alias/managed-services-customer-managed-key-alias\n targetKeyId: ${managedServicesCustomerManagedKey.keyId}\n managedServices:\n type: databricks:MwsCustomerManagedKeys\n name: managed_services\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${managedServicesCustomerManagedKey.arn}\n keyAlias: ${managedServicesCustomerManagedKeyAlias.name}\n useCases:\n - MANAGED_SERVICES\nvariables:\n current:\n fn::invoke:\n Function: aws:getCallerIdentity\n Arguments: {}\n databricksManagedServicesCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${current.accountId}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for control plane managed services\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n resources:\n - '*'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### For GCP\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\n// Id of a google_kms_crypto_key\nconst cmekResourceId = config.requireObject(\"cmekResourceId\");\nconst managedServices = new databricks.MwsCustomerManagedKeys(\"managed_services\", {\n accountId: databricksAccountId,\n gcpKeyInfo: {\n kmsKeyId: cmekResourceId,\n },\n useCases: [\"MANAGED_SERVICES\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\n# Id of a google_kms_crypto_key\ncmek_resource_id = config.require_object(\"cmekResourceId\")\nmanaged_services = databricks.MwsCustomerManagedKeys(\"managed_services\",\n account_id=databricks_account_id,\n gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs(\n kms_key_id=cmek_resource_id,\n ),\n use_cases=[\"MANAGED_SERVICES\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n // Id of a google_kms_crypto_key\n var cmekResourceId = config.RequireObject\u003cdynamic\u003e(\"cmekResourceId\");\n var managedServices = new Databricks.MwsCustomerManagedKeys(\"managed_services\", new()\n {\n AccountId = databricksAccountId,\n GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs\n {\n KmsKeyId = cmekResourceId,\n },\n UseCases = new[]\n {\n \"MANAGED_SERVICES\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\t// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\t// Id of a google_kms_crypto_key\n\t\tcmekResourceId := cfg.RequireObject(\"cmekResourceId\")\n\t\t_, err := databricks.NewMwsCustomerManagedKeys(ctx, \"managed_services\", \u0026databricks.MwsCustomerManagedKeysArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tGcpKeyInfo: \u0026databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{\n\t\t\t\tKmsKeyId: pulumi.Any(cmekResourceId),\n\t\t\t},\n\t\t\tUseCases: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"MANAGED_SERVICES\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var cmekResourceId = config.get(\"cmekResourceId\");\n var managedServices = new MwsCustomerManagedKeys(\"managedServices\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder()\n .kmsKeyId(cmekResourceId)\n .build())\n .useCases(\"MANAGED_SERVICES\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n cmekResourceId:\n type: dynamic\nresources:\n managedServices:\n type: databricks:MwsCustomerManagedKeys\n name: managed_services\n properties:\n accountId: ${databricksAccountId}\n gcpKeyInfo:\n kmsKeyId: ${cmekResourceId}\n useCases:\n - MANAGED_SERVICES\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Customer-managed key for workspace storage\n\n### For AWS\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\n// AWS ARN for the Databricks cross account role\nconst databricksCrossAccountRole = config.requireObject(\"databricksCrossAccountRole\");\nconst databricksStorageCmk = aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [current.accountId],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"Bool\",\n variable: \"kms:GrantIsForAWSResource\",\n values: [\"true\"],\n }],\n },\n {\n sid: \"Allow Databricks to use KMS key for EBS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [databricksCrossAccountRole],\n }],\n actions: [\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"ForAnyValue:StringLike\",\n variable: \"kms:ViaService\",\n values: [\"ec2.*.amazonaws.com\"],\n }],\n },\n ],\n});\nconst storageCustomerManagedKey = new aws.kms.Key(\"storage_customer_managed_key\", {policy: databricksStorageCmk.then(databricksStorageCmk =\u003e databricksStorageCmk.json)});\nconst storageCustomerManagedKeyAlias = new aws.kms.Alias(\"storage_customer_managed_key_alias\", {\n name: \"alias/storage-customer-managed-key-alias\",\n targetKeyId: storageCustomerManagedKey.keyId,\n});\nconst storage = new databricks.MwsCustomerManagedKeys(\"storage\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: storageCustomerManagedKey.arn,\n keyAlias: storageCustomerManagedKeyAlias.name,\n },\n useCases: [\"STORAGE\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\n# AWS ARN for the Databricks cross account role\ndatabricks_cross_account_role = config.require_object(\"databricksCrossAccountRole\")\ndatabricks_storage_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[current[\"accountId\"]],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"Bool\",\n variable=\"kms:GrantIsForAWSResource\",\n values=[\"true\"],\n )],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for EBS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[databricks_cross_account_role],\n )],\n actions=[\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"ForAnyValue:StringLike\",\n variable=\"kms:ViaService\",\n values=[\"ec2.*.amazonaws.com\"],\n )],\n ),\n ])\nstorage_customer_managed_key = aws.kms.Key(\"storage_customer_managed_key\", policy=databricks_storage_cmk.json)\nstorage_customer_managed_key_alias = aws.kms.Alias(\"storage_customer_managed_key_alias\",\n name=\"alias/storage-customer-managed-key-alias\",\n target_key_id=storage_customer_managed_key.key_id)\nstorage = databricks.MwsCustomerManagedKeys(\"storage\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=storage_customer_managed_key.arn,\n key_alias=storage_customer_managed_key_alias.name,\n ),\n use_cases=[\"STORAGE\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n // AWS ARN for the Databricks cross account role\n var databricksCrossAccountRole = config.RequireObject\u003cdynamic\u003e(\"databricksCrossAccountRole\");\n var databricksStorageCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n current.AccountId,\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS (Grants)\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"Bool\",\n Variable = \"kms:GrantIsForAWSResource\",\n Values = new[]\n {\n \"true\",\n },\n },\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for EBS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n databricksCrossAccountRole,\n },\n },\n },\n Actions = new[]\n {\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"ForAnyValue:StringLike\",\n Variable = \"kms:ViaService\",\n Values = new[]\n {\n \"ec2.*.amazonaws.com\",\n },\n },\n },\n },\n },\n });\n\n var storageCustomerManagedKey = new Aws.Kms.Key(\"storage_customer_managed_key\", new()\n {\n Policy = databricksStorageCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var storageCustomerManagedKeyAlias = new Aws.Kms.Alias(\"storage_customer_managed_key_alias\", new()\n {\n Name = \"alias/storage-customer-managed-key-alias\",\n TargetKeyId = storageCustomerManagedKey.KeyId,\n });\n\n var storage = new Databricks.MwsCustomerManagedKeys(\"storage\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = storageCustomerManagedKey.Arn,\n KeyAlias = storageCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"STORAGE\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n// AWS ARN for the Databricks cross account role\ndatabricksCrossAccountRole := cfg.RequireObject(\"databricksCrossAccountRole\")\ndatabricksStorageCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ncurrent.AccountId,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n\"kms:ReEncrypt*\",\n\"kms:GenerateDataKey*\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS (Grants)\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:CreateGrant\",\n\"kms:ListGrants\",\n\"kms:RevokeGrant\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"Bool\",\nVariable: \"kms:GrantIsForAWSResource\",\nValues: []string{\n\"true\",\n},\n},\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for EBS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ndatabricksCrossAccountRole,\n},\n},\n},\nActions: []string{\n\"kms:Decrypt\",\n\"kms:GenerateDataKey*\",\n\"kms:CreateGrant\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"ForAnyValue:StringLike\",\nVariable: \"kms:ViaService\",\nValues: []string{\n\"ec2.*.amazonaws.com\",\n},\n},\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKey, err := kms.NewKey(ctx, \"storage_customer_managed_key\", \u0026kms.KeyArgs{\nPolicy: pulumi.String(databricksStorageCmk.Json),\n})\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"storage_customer_managed_key_alias\", \u0026kms.AliasArgs{\nName: pulumi.String(\"alias/storage-customer-managed-key-alias\"),\nTargetKeyId: storageCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"storage\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: storageCustomerManagedKey.Arn,\nKeyAlias: storageCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"STORAGE\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var databricksCrossAccountRole = config.get(\"databricksCrossAccountRole\");\n final var databricksStorageCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(current.accountId())\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS (Grants)\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"Bool\")\n .variable(\"kms:GrantIsForAWSResource\")\n .values(\"true\")\n .build())\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for EBS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(databricksCrossAccountRole)\n .build())\n .actions( \n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"ForAnyValue:StringLike\")\n .variable(\"kms:ViaService\")\n .values(\"ec2.*.amazonaws.com\")\n .build())\n .build())\n .build());\n\n var storageCustomerManagedKey = new Key(\"storageCustomerManagedKey\", KeyArgs.builder()\n .policy(databricksStorageCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var storageCustomerManagedKeyAlias = new Alias(\"storageCustomerManagedKeyAlias\", AliasArgs.builder()\n .name(\"alias/storage-customer-managed-key-alias\")\n .targetKeyId(storageCustomerManagedKey.keyId())\n .build());\n\n var storage = new MwsCustomerManagedKeys(\"storage\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(storageCustomerManagedKey.arn())\n .keyAlias(storageCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"STORAGE\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n databricksCrossAccountRole:\n type: dynamic\nresources:\n storageCustomerManagedKey:\n type: aws:kms:Key\n name: storage_customer_managed_key\n properties:\n policy: ${databricksStorageCmk.json}\n storageCustomerManagedKeyAlias:\n type: aws:kms:Alias\n name: storage_customer_managed_key_alias\n properties:\n name: alias/storage-customer-managed-key-alias\n targetKeyId: ${storageCustomerManagedKey.keyId}\n storage:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${storageCustomerManagedKey.arn}\n keyAlias: ${storageCustomerManagedKeyAlias.name}\n useCases:\n - STORAGE\nvariables:\n databricksStorageCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${current.accountId}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n - kms:ReEncrypt*\n - kms:GenerateDataKey*\n - kms:DescribeKey\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS (Grants)\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:CreateGrant\n - kms:ListGrants\n - kms:RevokeGrant\n resources:\n - '*'\n conditions:\n - test: Bool\n variable: kms:GrantIsForAWSResource\n values:\n - 'true'\n - sid: Allow Databricks to use KMS key for EBS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${databricksCrossAccountRole}\n actions:\n - kms:Decrypt\n - kms:GenerateDataKey*\n - kms:CreateGrant\n - kms:DescribeKey\n resources:\n - '*'\n conditions:\n - test: ForAnyValue:StringLike\n variable: kms:ViaService\n values:\n - ec2.*.amazonaws.com\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### For GCP\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\n// Id of a google_kms_crypto_key\nconst cmekResourceId = config.requireObject(\"cmekResourceId\");\nconst storage = new databricks.MwsCustomerManagedKeys(\"storage\", {\n accountId: databricksAccountId,\n gcpKeyInfo: {\n kmsKeyId: cmekResourceId,\n },\n useCases: [\"STORAGE\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\n# Id of a google_kms_crypto_key\ncmek_resource_id = config.require_object(\"cmekResourceId\")\nstorage = databricks.MwsCustomerManagedKeys(\"storage\",\n account_id=databricks_account_id,\n gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs(\n kms_key_id=cmek_resource_id,\n ),\n use_cases=[\"STORAGE\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n // Id of a google_kms_crypto_key\n var cmekResourceId = config.RequireObject\u003cdynamic\u003e(\"cmekResourceId\");\n var storage = new Databricks.MwsCustomerManagedKeys(\"storage\", new()\n {\n AccountId = databricksAccountId,\n GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs\n {\n KmsKeyId = cmekResourceId,\n },\n UseCases = new[]\n {\n \"STORAGE\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\t// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\t// Id of a google_kms_crypto_key\n\t\tcmekResourceId := cfg.RequireObject(\"cmekResourceId\")\n\t\t_, err := databricks.NewMwsCustomerManagedKeys(ctx, \"storage\", \u0026databricks.MwsCustomerManagedKeysArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tGcpKeyInfo: \u0026databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{\n\t\t\t\tKmsKeyId: pulumi.Any(cmekResourceId),\n\t\t\t},\n\t\t\tUseCases: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"STORAGE\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var cmekResourceId = config.get(\"cmekResourceId\");\n var storage = new MwsCustomerManagedKeys(\"storage\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder()\n .kmsKeyId(cmekResourceId)\n .build())\n .useCases(\"STORAGE\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n cmekResourceId:\n type: dynamic\nresources:\n storage:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n gcpKeyInfo:\n kmsKeyId: ${cmekResourceId}\n useCases:\n - STORAGE\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", + "description": "## Example Usage\n\n\u003e **Note** If you've used the resource before, please add `use_cases = [\"MANAGED_SERVICES\"]` to keep the previous behaviour.\n\n### Customer-managed key for managed services\n\nYou must configure this during workspace creation\n\n### For AWS\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst current = aws.getCallerIdentity({});\nconst databricksManagedServicesCmk = current.then(current =\u003e aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [current.accountId],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for control plane managed services\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources: [\"*\"],\n },\n ],\n}));\nconst managedServicesCustomerManagedKey = new aws.kms.Key(\"managed_services_customer_managed_key\", {policy: databricksManagedServicesCmk.then(databricksManagedServicesCmk =\u003e databricksManagedServicesCmk.json)});\nconst managedServicesCustomerManagedKeyAlias = new aws.kms.Alias(\"managed_services_customer_managed_key_alias\", {\n name: \"alias/managed-services-customer-managed-key-alias\",\n targetKeyId: managedServicesCustomerManagedKey.keyId,\n});\nconst managedServices = new databricks.MwsCustomerManagedKeys(\"managed_services\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: managedServicesCustomerManagedKey.arn,\n keyAlias: managedServicesCustomerManagedKeyAlias.name,\n },\n useCases: [\"MANAGED_SERVICES\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ncurrent = aws.get_caller_identity()\ndatabricks_managed_services_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[current.account_id],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for control plane managed services\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources=[\"*\"],\n ),\n ])\nmanaged_services_customer_managed_key = aws.kms.Key(\"managed_services_customer_managed_key\", policy=databricks_managed_services_cmk.json)\nmanaged_services_customer_managed_key_alias = aws.kms.Alias(\"managed_services_customer_managed_key_alias\",\n name=\"alias/managed-services-customer-managed-key-alias\",\n target_key_id=managed_services_customer_managed_key.key_id)\nmanaged_services = databricks.MwsCustomerManagedKeys(\"managed_services\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=managed_services_customer_managed_key.arn,\n key_alias=managed_services_customer_managed_key_alias.name,\n ),\n use_cases=[\"MANAGED_SERVICES\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var current = Aws.GetCallerIdentity.Invoke();\n\n var databricksManagedServicesCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n current.Apply(getCallerIdentityResult =\u003e getCallerIdentityResult.AccountId),\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for control plane managed services\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n },\n });\n\n var managedServicesCustomerManagedKey = new Aws.Kms.Key(\"managed_services_customer_managed_key\", new()\n {\n Policy = databricksManagedServicesCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var managedServicesCustomerManagedKeyAlias = new Aws.Kms.Alias(\"managed_services_customer_managed_key_alias\", new()\n {\n Name = \"alias/managed-services-customer-managed-key-alias\",\n TargetKeyId = managedServicesCustomerManagedKey.KeyId,\n });\n\n var managedServices = new Databricks.MwsCustomerManagedKeys(\"managed_services\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = managedServicesCustomerManagedKey.Arn,\n KeyAlias = managedServicesCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"MANAGED_SERVICES\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\ncurrent, err := aws.GetCallerIdentity(ctx, nil, nil);\nif err != nil {\nreturn err\n}\ndatabricksManagedServicesCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ncurrent.AccountId,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for control plane managed services\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n},\nResources: []string{\n\"*\",\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKey, err := kms.NewKey(ctx, \"managed_services_customer_managed_key\", \u0026kms.KeyArgs{\nPolicy: pulumi.String(databricksManagedServicesCmk.Json),\n})\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"managed_services_customer_managed_key_alias\", \u0026kms.AliasArgs{\nName: pulumi.String(\"alias/managed-services-customer-managed-key-alias\"),\nTargetKeyId: managedServicesCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"managed_services\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: managedServicesCustomerManagedKey.Arn,\nKeyAlias: managedServicesCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"MANAGED_SERVICES\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.AwsFunctions;\nimport com.pulumi.aws.inputs.GetCallerIdentityArgs;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var current = AwsFunctions.getCallerIdentity();\n\n final var databricksManagedServicesCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(current.applyValue(getCallerIdentityResult -\u003e getCallerIdentityResult.accountId()))\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for control plane managed services\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\")\n .resources(\"*\")\n .build())\n .build());\n\n var managedServicesCustomerManagedKey = new Key(\"managedServicesCustomerManagedKey\", KeyArgs.builder()\n .policy(databricksManagedServicesCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var managedServicesCustomerManagedKeyAlias = new Alias(\"managedServicesCustomerManagedKeyAlias\", AliasArgs.builder()\n .name(\"alias/managed-services-customer-managed-key-alias\")\n .targetKeyId(managedServicesCustomerManagedKey.keyId())\n .build());\n\n var managedServices = new MwsCustomerManagedKeys(\"managedServices\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(managedServicesCustomerManagedKey.arn())\n .keyAlias(managedServicesCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"MANAGED_SERVICES\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\nresources:\n managedServicesCustomerManagedKey:\n type: aws:kms:Key\n name: managed_services_customer_managed_key\n properties:\n policy: ${databricksManagedServicesCmk.json}\n managedServicesCustomerManagedKeyAlias:\n type: aws:kms:Alias\n name: managed_services_customer_managed_key_alias\n properties:\n name: alias/managed-services-customer-managed-key-alias\n targetKeyId: ${managedServicesCustomerManagedKey.keyId}\n managedServices:\n type: databricks:MwsCustomerManagedKeys\n name: managed_services\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${managedServicesCustomerManagedKey.arn}\n keyAlias: ${managedServicesCustomerManagedKeyAlias.name}\n useCases:\n - MANAGED_SERVICES\nvariables:\n current:\n fn::invoke:\n Function: aws:getCallerIdentity\n Arguments: {}\n databricksManagedServicesCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${current.accountId}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for control plane managed services\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n resources:\n - '*'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### For GCP\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\n// Id of a google_kms_crypto_key\nconst cmekResourceId = config.requireObject(\"cmekResourceId\");\nconst managedServices = new databricks.MwsCustomerManagedKeys(\"managed_services\", {\n accountId: databricksAccountId,\n gcpKeyInfo: {\n kmsKeyId: cmekResourceId,\n },\n useCases: [\"MANAGED_SERVICES\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\n# Id of a google_kms_crypto_key\ncmek_resource_id = config.require_object(\"cmekResourceId\")\nmanaged_services = databricks.MwsCustomerManagedKeys(\"managed_services\",\n account_id=databricks_account_id,\n gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs(\n kms_key_id=cmek_resource_id,\n ),\n use_cases=[\"MANAGED_SERVICES\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n // Id of a google_kms_crypto_key\n var cmekResourceId = config.RequireObject\u003cdynamic\u003e(\"cmekResourceId\");\n var managedServices = new Databricks.MwsCustomerManagedKeys(\"managed_services\", new()\n {\n AccountId = databricksAccountId,\n GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs\n {\n KmsKeyId = cmekResourceId,\n },\n UseCases = new[]\n {\n \"MANAGED_SERVICES\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\t// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\t// Id of a google_kms_crypto_key\n\t\tcmekResourceId := cfg.RequireObject(\"cmekResourceId\")\n\t\t_, err := databricks.NewMwsCustomerManagedKeys(ctx, \"managed_services\", \u0026databricks.MwsCustomerManagedKeysArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tGcpKeyInfo: \u0026databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{\n\t\t\t\tKmsKeyId: pulumi.Any(cmekResourceId),\n\t\t\t},\n\t\t\tUseCases: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"MANAGED_SERVICES\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var cmekResourceId = config.get(\"cmekResourceId\");\n var managedServices = new MwsCustomerManagedKeys(\"managedServices\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder()\n .kmsKeyId(cmekResourceId)\n .build())\n .useCases(\"MANAGED_SERVICES\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n cmekResourceId:\n type: dynamic\nresources:\n managedServices:\n type: databricks:MwsCustomerManagedKeys\n name: managed_services\n properties:\n accountId: ${databricksAccountId}\n gcpKeyInfo:\n kmsKeyId: ${cmekResourceId}\n useCases:\n - MANAGED_SERVICES\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Customer-managed key for workspace storage\n\n### For AWS\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\n// AWS ARN for the Databricks cross account role\nconst databricksCrossAccountRole = config.requireObject(\"databricksCrossAccountRole\");\nconst databricksStorageCmk = aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [current.accountId],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"Bool\",\n variable: \"kms:GrantIsForAWSResource\",\n values: [\"true\"],\n }],\n },\n {\n sid: \"Allow Databricks to use KMS key for EBS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [databricksCrossAccountRole],\n }],\n actions: [\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"ForAnyValue:StringLike\",\n variable: \"kms:ViaService\",\n values: [\"ec2.*.amazonaws.com\"],\n }],\n },\n ],\n});\nconst storageCustomerManagedKey = new aws.kms.Key(\"storage_customer_managed_key\", {policy: databricksStorageCmk.then(databricksStorageCmk =\u003e databricksStorageCmk.json)});\nconst storageCustomerManagedKeyAlias = new aws.kms.Alias(\"storage_customer_managed_key_alias\", {\n name: \"alias/storage-customer-managed-key-alias\",\n targetKeyId: storageCustomerManagedKey.keyId,\n});\nconst storage = new databricks.MwsCustomerManagedKeys(\"storage\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: storageCustomerManagedKey.arn,\n keyAlias: storageCustomerManagedKeyAlias.name,\n },\n useCases: [\"STORAGE\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\n# AWS ARN for the Databricks cross account role\ndatabricks_cross_account_role = config.require_object(\"databricksCrossAccountRole\")\ndatabricks_storage_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[current[\"accountId\"]],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"Bool\",\n variable=\"kms:GrantIsForAWSResource\",\n values=[\"true\"],\n )],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for EBS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[databricks_cross_account_role],\n )],\n actions=[\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"ForAnyValue:StringLike\",\n variable=\"kms:ViaService\",\n values=[\"ec2.*.amazonaws.com\"],\n )],\n ),\n ])\nstorage_customer_managed_key = aws.kms.Key(\"storage_customer_managed_key\", policy=databricks_storage_cmk.json)\nstorage_customer_managed_key_alias = aws.kms.Alias(\"storage_customer_managed_key_alias\",\n name=\"alias/storage-customer-managed-key-alias\",\n target_key_id=storage_customer_managed_key.key_id)\nstorage = databricks.MwsCustomerManagedKeys(\"storage\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=storage_customer_managed_key.arn,\n key_alias=storage_customer_managed_key_alias.name,\n ),\n use_cases=[\"STORAGE\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n // AWS ARN for the Databricks cross account role\n var databricksCrossAccountRole = config.RequireObject\u003cdynamic\u003e(\"databricksCrossAccountRole\");\n var databricksStorageCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n current.AccountId,\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS (Grants)\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"Bool\",\n Variable = \"kms:GrantIsForAWSResource\",\n Values = new[]\n {\n \"true\",\n },\n },\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for EBS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n databricksCrossAccountRole,\n },\n },\n },\n Actions = new[]\n {\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"ForAnyValue:StringLike\",\n Variable = \"kms:ViaService\",\n Values = new[]\n {\n \"ec2.*.amazonaws.com\",\n },\n },\n },\n },\n },\n });\n\n var storageCustomerManagedKey = new Aws.Kms.Key(\"storage_customer_managed_key\", new()\n {\n Policy = databricksStorageCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var storageCustomerManagedKeyAlias = new Aws.Kms.Alias(\"storage_customer_managed_key_alias\", new()\n {\n Name = \"alias/storage-customer-managed-key-alias\",\n TargetKeyId = storageCustomerManagedKey.KeyId,\n });\n\n var storage = new Databricks.MwsCustomerManagedKeys(\"storage\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = storageCustomerManagedKey.Arn,\n KeyAlias = storageCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"STORAGE\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n// AWS ARN for the Databricks cross account role\ndatabricksCrossAccountRole := cfg.RequireObject(\"databricksCrossAccountRole\")\ndatabricksStorageCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ncurrent.AccountId,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n\"kms:ReEncrypt*\",\n\"kms:GenerateDataKey*\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS (Grants)\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:CreateGrant\",\n\"kms:ListGrants\",\n\"kms:RevokeGrant\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"Bool\",\nVariable: \"kms:GrantIsForAWSResource\",\nValues: []string{\n\"true\",\n},\n},\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for EBS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ndatabricksCrossAccountRole,\n},\n},\n},\nActions: []string{\n\"kms:Decrypt\",\n\"kms:GenerateDataKey*\",\n\"kms:CreateGrant\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"ForAnyValue:StringLike\",\nVariable: \"kms:ViaService\",\nValues: []string{\n\"ec2.*.amazonaws.com\",\n},\n},\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKey, err := kms.NewKey(ctx, \"storage_customer_managed_key\", \u0026kms.KeyArgs{\nPolicy: pulumi.String(databricksStorageCmk.Json),\n})\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"storage_customer_managed_key_alias\", \u0026kms.AliasArgs{\nName: pulumi.String(\"alias/storage-customer-managed-key-alias\"),\nTargetKeyId: storageCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"storage\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: storageCustomerManagedKey.Arn,\nKeyAlias: storageCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"STORAGE\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var databricksCrossAccountRole = config.get(\"databricksCrossAccountRole\");\n final var databricksStorageCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(current.accountId())\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS (Grants)\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"Bool\")\n .variable(\"kms:GrantIsForAWSResource\")\n .values(\"true\")\n .build())\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for EBS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(databricksCrossAccountRole)\n .build())\n .actions( \n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"ForAnyValue:StringLike\")\n .variable(\"kms:ViaService\")\n .values(\"ec2.*.amazonaws.com\")\n .build())\n .build())\n .build());\n\n var storageCustomerManagedKey = new Key(\"storageCustomerManagedKey\", KeyArgs.builder()\n .policy(databricksStorageCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var storageCustomerManagedKeyAlias = new Alias(\"storageCustomerManagedKeyAlias\", AliasArgs.builder()\n .name(\"alias/storage-customer-managed-key-alias\")\n .targetKeyId(storageCustomerManagedKey.keyId())\n .build());\n\n var storage = new MwsCustomerManagedKeys(\"storage\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(storageCustomerManagedKey.arn())\n .keyAlias(storageCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"STORAGE\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n databricksCrossAccountRole:\n type: dynamic\nresources:\n storageCustomerManagedKey:\n type: aws:kms:Key\n name: storage_customer_managed_key\n properties:\n policy: ${databricksStorageCmk.json}\n storageCustomerManagedKeyAlias:\n type: aws:kms:Alias\n name: storage_customer_managed_key_alias\n properties:\n name: alias/storage-customer-managed-key-alias\n targetKeyId: ${storageCustomerManagedKey.keyId}\n storage:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${storageCustomerManagedKey.arn}\n keyAlias: ${storageCustomerManagedKeyAlias.name}\n useCases:\n - STORAGE\nvariables:\n databricksStorageCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${current.accountId}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n - kms:ReEncrypt*\n - kms:GenerateDataKey*\n - kms:DescribeKey\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS (Grants)\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:CreateGrant\n - kms:ListGrants\n - kms:RevokeGrant\n resources:\n - '*'\n conditions:\n - test: Bool\n variable: kms:GrantIsForAWSResource\n values:\n - 'true'\n - sid: Allow Databricks to use KMS key for EBS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${databricksCrossAccountRole}\n actions:\n - kms:Decrypt\n - kms:GenerateDataKey*\n - kms:CreateGrant\n - kms:DescribeKey\n resources:\n - '*'\n conditions:\n - test: ForAnyValue:StringLike\n variable: kms:ViaService\n values:\n - ec2.*.amazonaws.com\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### For GCP\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\n// Id of a google_kms_crypto_key\nconst cmekResourceId = config.requireObject(\"cmekResourceId\");\nconst storage = new databricks.MwsCustomerManagedKeys(\"storage\", {\n accountId: databricksAccountId,\n gcpKeyInfo: {\n kmsKeyId: cmekResourceId,\n },\n useCases: [\"STORAGE\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\n# Id of a google_kms_crypto_key\ncmek_resource_id = config.require_object(\"cmekResourceId\")\nstorage = databricks.MwsCustomerManagedKeys(\"storage\",\n account_id=databricks_account_id,\n gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs(\n kms_key_id=cmek_resource_id,\n ),\n use_cases=[\"STORAGE\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n // Id of a google_kms_crypto_key\n var cmekResourceId = config.RequireObject\u003cdynamic\u003e(\"cmekResourceId\");\n var storage = new Databricks.MwsCustomerManagedKeys(\"storage\", new()\n {\n AccountId = databricksAccountId,\n GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs\n {\n KmsKeyId = cmekResourceId,\n },\n UseCases = new[]\n {\n \"STORAGE\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\t// Account Id that could be found in the top right corner of https://accounts.gcp.databricks.com/\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\t// Id of a google_kms_crypto_key\n\t\tcmekResourceId := cfg.RequireObject(\"cmekResourceId\")\n\t\t_, err := databricks.NewMwsCustomerManagedKeys(ctx, \"storage\", \u0026databricks.MwsCustomerManagedKeysArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tGcpKeyInfo: \u0026databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{\n\t\t\t\tKmsKeyId: pulumi.Any(cmekResourceId),\n\t\t\t},\n\t\t\tUseCases: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"STORAGE\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var cmekResourceId = config.get(\"cmekResourceId\");\n var storage = new MwsCustomerManagedKeys(\"storage\", MwsCustomerManagedKeysArgs.builder()\n .accountId(databricksAccountId)\n .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder()\n .kmsKeyId(cmekResourceId)\n .build())\n .useCases(\"STORAGE\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n cmekResourceId:\n type: dynamic\nresources:\n storage:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n gcpKeyInfo:\n kmsKeyId: ${cmekResourceId}\n useCases:\n - STORAGE\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", "properties": { "accountId": { "type": "string", @@ -20215,7 +20392,7 @@ } }, "databricks:index/mwsLogDelivery:MwsLogDelivery": { - "description": "\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nThis resource configures the delivery of the two supported log types from Databricks workspaces: [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n\nYou cannot delete a log delivery configuration, but you can disable it when you no longer need it. This fact is important because there is a limit to the number of enabled log delivery configurations that you can create for an account. You can create a maximum of two enabled configurations that use the account level (no workspace filter) and two enabled configurations for every specific workspace (a workspaceId can occur in the workspace filter for two configurations). You can re-enable a disabled configuration, but the request fails if it violates the limits previously described.\n\n## Billable Usage\n\nCSV files are delivered to `\u003cdelivery_path_prefix\u003e/billable-usage/csv/` and are named `workspaceId=\u003cworkspace-id\u003e-usageMonth=\u003cmonth\u003e.csv`, which are delivered daily by overwriting the month's CSV file for each workspace. Format of CSV file, as well as some usage examples, can be found [here](https://docs.databricks.com/administration-guide/account-settings/usage.html#download-usage-as-a-csv-file).\n\nCommon processing scenario is to apply [cost allocation tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html), that could be enforced by setting custom_tags on a cluster or through cluster policy. Report contains `clusterId` field, that could be joined with data from AWS [cost and usage reports](https://docs.aws.amazon.com/cur/latest/userguide/cur-create.html), that can be joined with `user:ClusterId` tag from AWS usage report.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst usageLogs = new databricks.MwsLogDelivery(\"usage_logs\", {\n accountId: databricksAccountId,\n credentialsId: logWriter.credentialsId,\n storageConfigurationId: logBucket.storageConfigurationId,\n deliveryPathPrefix: \"billable-usage\",\n configName: \"Usage Logs\",\n logType: \"BILLABLE_USAGE\",\n outputFormat: \"CSV\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nusage_logs = databricks.MwsLogDelivery(\"usage_logs\",\n account_id=databricks_account_id,\n credentials_id=log_writer[\"credentialsId\"],\n storage_configuration_id=log_bucket[\"storageConfigurationId\"],\n delivery_path_prefix=\"billable-usage\",\n config_name=\"Usage Logs\",\n log_type=\"BILLABLE_USAGE\",\n output_format=\"CSV\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var usageLogs = new Databricks.MwsLogDelivery(\"usage_logs\", new()\n {\n AccountId = databricksAccountId,\n CredentialsId = logWriter.CredentialsId,\n StorageConfigurationId = logBucket.StorageConfigurationId,\n DeliveryPathPrefix = \"billable-usage\",\n ConfigName = \"Usage Logs\",\n LogType = \"BILLABLE_USAGE\",\n OutputFormat = \"CSV\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsLogDelivery(ctx, \"usage_logs\", \u0026databricks.MwsLogDeliveryArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tCredentialsId: pulumi.Any(logWriter.CredentialsId),\n\t\t\tStorageConfigurationId: pulumi.Any(logBucket.StorageConfigurationId),\n\t\t\tDeliveryPathPrefix: pulumi.String(\"billable-usage\"),\n\t\t\tConfigName: pulumi.String(\"Usage Logs\"),\n\t\t\tLogType: pulumi.String(\"BILLABLE_USAGE\"),\n\t\t\tOutputFormat: pulumi.String(\"CSV\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsLogDelivery;\nimport com.pulumi.databricks.MwsLogDeliveryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var usageLogs = new MwsLogDelivery(\"usageLogs\", MwsLogDeliveryArgs.builder()\n .accountId(databricksAccountId)\n .credentialsId(logWriter.credentialsId())\n .storageConfigurationId(logBucket.storageConfigurationId())\n .deliveryPathPrefix(\"billable-usage\")\n .configName(\"Usage Logs\")\n .logType(\"BILLABLE_USAGE\")\n .outputFormat(\"CSV\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n usageLogs:\n type: databricks:MwsLogDelivery\n name: usage_logs\n properties:\n accountId: ${databricksAccountId}\n credentialsId: ${logWriter.credentialsId}\n storageConfigurationId: ${logBucket.storageConfigurationId}\n deliveryPathPrefix: billable-usage\n configName: Usage Logs\n logType: BILLABLE_USAGE\n outputFormat: CSV\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Audit Logs\n\nJSON files with [static schema](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html#audit-log-schema) are delivered to `\u003cdelivery_path_prefix\u003e/workspaceId=\u003cworkspaceId\u003e/date=\u003cyyyy-mm-dd\u003e/auditlogs_\u003cinternal-id\u003e.json`. Logs are available within 15 minutes of activation for audit logs. New JSON files are delivered every few minutes, potentially overwriting existing files for each workspace. Sometimes data may arrive later than 15 minutes. Databricks can overwrite the delivered log files in your bucket at any time. If a file is overwritten, the existing content remains, but there may be additional lines for more auditable events. Overwriting ensures exactly-once semantics without requiring read or delete access to your account.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst auditLogs = new databricks.MwsLogDelivery(\"audit_logs\", {\n accountId: databricksAccountId,\n credentialsId: logWriter.credentialsId,\n storageConfigurationId: logBucket.storageConfigurationId,\n deliveryPathPrefix: \"audit-logs\",\n configName: \"Audit Logs\",\n logType: \"AUDIT_LOGS\",\n outputFormat: \"JSON\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naudit_logs = databricks.MwsLogDelivery(\"audit_logs\",\n account_id=databricks_account_id,\n credentials_id=log_writer[\"credentialsId\"],\n storage_configuration_id=log_bucket[\"storageConfigurationId\"],\n delivery_path_prefix=\"audit-logs\",\n config_name=\"Audit Logs\",\n log_type=\"AUDIT_LOGS\",\n output_format=\"JSON\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var auditLogs = new Databricks.MwsLogDelivery(\"audit_logs\", new()\n {\n AccountId = databricksAccountId,\n CredentialsId = logWriter.CredentialsId,\n StorageConfigurationId = logBucket.StorageConfigurationId,\n DeliveryPathPrefix = \"audit-logs\",\n ConfigName = \"Audit Logs\",\n LogType = \"AUDIT_LOGS\",\n OutputFormat = \"JSON\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsLogDelivery(ctx, \"audit_logs\", \u0026databricks.MwsLogDeliveryArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tCredentialsId: pulumi.Any(logWriter.CredentialsId),\n\t\t\tStorageConfigurationId: pulumi.Any(logBucket.StorageConfigurationId),\n\t\t\tDeliveryPathPrefix: pulumi.String(\"audit-logs\"),\n\t\t\tConfigName: pulumi.String(\"Audit Logs\"),\n\t\t\tLogType: pulumi.String(\"AUDIT_LOGS\"),\n\t\t\tOutputFormat: pulumi.String(\"JSON\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsLogDelivery;\nimport com.pulumi.databricks.MwsLogDeliveryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var auditLogs = new MwsLogDelivery(\"auditLogs\", MwsLogDeliveryArgs.builder()\n .accountId(databricksAccountId)\n .credentialsId(logWriter.credentialsId())\n .storageConfigurationId(logBucket.storageConfigurationId())\n .deliveryPathPrefix(\"audit-logs\")\n .configName(\"Audit Logs\")\n .logType(\"AUDIT_LOGS\")\n .outputFormat(\"JSON\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n auditLogs:\n type: databricks:MwsLogDelivery\n name: audit_logs\n properties:\n accountId: ${databricksAccountId}\n credentialsId: ${logWriter.credentialsId}\n storageConfigurationId: ${logBucket.storageConfigurationId}\n deliveryPathPrefix: audit-logs\n configName: Audit Logs\n logType: AUDIT_LOGS\n outputFormat: JSON\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", + "description": "\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nThis resource configures the delivery of the two supported log types from Databricks workspaces: [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n\nYou cannot delete a log delivery configuration, but you can disable it when you no longer need it. This fact is important because there is a limit to the number of enabled log delivery configurations that you can create for an account. You can create a maximum of two enabled configurations that use the account level (no workspace filter) and two enabled configurations for every specific workspace (a workspaceId can occur in the workspace filter for two configurations). You can re-enable a disabled configuration, but the request fails if it violates the limits previously described.\n\n## Billable Usage\n\nCSV files are delivered to `\u003cdelivery_path_prefix\u003e/billable-usage/csv/` and are named `workspaceId=\u003cworkspace-id\u003e-usageMonth=\u003cmonth\u003e.csv`, which are delivered daily by overwriting the month's CSV file for each workspace. Format of CSV file, as well as some usage examples, can be found [here](https://docs.databricks.com/administration-guide/account-settings/usage.html#download-usage-as-a-csv-file).\n\nCommon processing scenario is to apply [cost allocation tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html), that could be enforced by setting custom_tags on a cluster or through cluster policy. Report contains `clusterId` field, that could be joined with data from AWS [cost and usage reports](https://docs.aws.amazon.com/cur/latest/userguide/cur-create.html), that can be joined with `user:ClusterId` tag from AWS usage report.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst usageLogs = new databricks.MwsLogDelivery(\"usage_logs\", {\n accountId: databricksAccountId,\n credentialsId: logWriter.credentialsId,\n storageConfigurationId: logBucket.storageConfigurationId,\n deliveryPathPrefix: \"billable-usage\",\n configName: \"Usage Logs\",\n logType: \"BILLABLE_USAGE\",\n outputFormat: \"CSV\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nusage_logs = databricks.MwsLogDelivery(\"usage_logs\",\n account_id=databricks_account_id,\n credentials_id=log_writer[\"credentialsId\"],\n storage_configuration_id=log_bucket[\"storageConfigurationId\"],\n delivery_path_prefix=\"billable-usage\",\n config_name=\"Usage Logs\",\n log_type=\"BILLABLE_USAGE\",\n output_format=\"CSV\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var usageLogs = new Databricks.MwsLogDelivery(\"usage_logs\", new()\n {\n AccountId = databricksAccountId,\n CredentialsId = logWriter.CredentialsId,\n StorageConfigurationId = logBucket.StorageConfigurationId,\n DeliveryPathPrefix = \"billable-usage\",\n ConfigName = \"Usage Logs\",\n LogType = \"BILLABLE_USAGE\",\n OutputFormat = \"CSV\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsLogDelivery(ctx, \"usage_logs\", \u0026databricks.MwsLogDeliveryArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tCredentialsId: pulumi.Any(logWriter.CredentialsId),\n\t\t\tStorageConfigurationId: pulumi.Any(logBucket.StorageConfigurationId),\n\t\t\tDeliveryPathPrefix: pulumi.String(\"billable-usage\"),\n\t\t\tConfigName: pulumi.String(\"Usage Logs\"),\n\t\t\tLogType: pulumi.String(\"BILLABLE_USAGE\"),\n\t\t\tOutputFormat: pulumi.String(\"CSV\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsLogDelivery;\nimport com.pulumi.databricks.MwsLogDeliveryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var usageLogs = new MwsLogDelivery(\"usageLogs\", MwsLogDeliveryArgs.builder()\n .accountId(databricksAccountId)\n .credentialsId(logWriter.credentialsId())\n .storageConfigurationId(logBucket.storageConfigurationId())\n .deliveryPathPrefix(\"billable-usage\")\n .configName(\"Usage Logs\")\n .logType(\"BILLABLE_USAGE\")\n .outputFormat(\"CSV\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n usageLogs:\n type: databricks:MwsLogDelivery\n name: usage_logs\n properties:\n accountId: ${databricksAccountId}\n credentialsId: ${logWriter.credentialsId}\n storageConfigurationId: ${logBucket.storageConfigurationId}\n deliveryPathPrefix: billable-usage\n configName: Usage Logs\n logType: BILLABLE_USAGE\n outputFormat: CSV\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Audit Logs\n\nJSON files with [static schema](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html#audit-log-schema) are delivered to `\u003cdelivery_path_prefix\u003e/workspaceId=\u003cworkspaceId\u003e/date=\u003cyyyy-mm-dd\u003e/auditlogs_\u003cinternal-id\u003e.json`. Logs are available within 15 minutes of activation for audit logs. New JSON files are delivered every few minutes, potentially overwriting existing files for each workspace. Sometimes data may arrive later than 15 minutes. Databricks can overwrite the delivered log files in your bucket at any time. If a file is overwritten, the existing content remains, but there may be additional lines for more auditable events. Overwriting ensures exactly-once semantics without requiring read or delete access to your account.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst auditLogs = new databricks.MwsLogDelivery(\"audit_logs\", {\n accountId: databricksAccountId,\n credentialsId: logWriter.credentialsId,\n storageConfigurationId: logBucket.storageConfigurationId,\n deliveryPathPrefix: \"audit-logs\",\n configName: \"Audit Logs\",\n logType: \"AUDIT_LOGS\",\n outputFormat: \"JSON\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naudit_logs = databricks.MwsLogDelivery(\"audit_logs\",\n account_id=databricks_account_id,\n credentials_id=log_writer[\"credentialsId\"],\n storage_configuration_id=log_bucket[\"storageConfigurationId\"],\n delivery_path_prefix=\"audit-logs\",\n config_name=\"Audit Logs\",\n log_type=\"AUDIT_LOGS\",\n output_format=\"JSON\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var auditLogs = new Databricks.MwsLogDelivery(\"audit_logs\", new()\n {\n AccountId = databricksAccountId,\n CredentialsId = logWriter.CredentialsId,\n StorageConfigurationId = logBucket.StorageConfigurationId,\n DeliveryPathPrefix = \"audit-logs\",\n ConfigName = \"Audit Logs\",\n LogType = \"AUDIT_LOGS\",\n OutputFormat = \"JSON\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsLogDelivery(ctx, \"audit_logs\", \u0026databricks.MwsLogDeliveryArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tCredentialsId: pulumi.Any(logWriter.CredentialsId),\n\t\t\tStorageConfigurationId: pulumi.Any(logBucket.StorageConfigurationId),\n\t\t\tDeliveryPathPrefix: pulumi.String(\"audit-logs\"),\n\t\t\tConfigName: pulumi.String(\"Audit Logs\"),\n\t\t\tLogType: pulumi.String(\"AUDIT_LOGS\"),\n\t\t\tOutputFormat: pulumi.String(\"JSON\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsLogDelivery;\nimport com.pulumi.databricks.MwsLogDeliveryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var auditLogs = new MwsLogDelivery(\"auditLogs\", MwsLogDeliveryArgs.builder()\n .accountId(databricksAccountId)\n .credentialsId(logWriter.credentialsId())\n .storageConfigurationId(logBucket.storageConfigurationId())\n .deliveryPathPrefix(\"audit-logs\")\n .configName(\"Audit Logs\")\n .logType(\"AUDIT_LOGS\")\n .outputFormat(\"JSON\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n auditLogs:\n type: databricks:MwsLogDelivery\n name: audit_logs\n properties:\n accountId: ${databricksAccountId}\n credentialsId: ${logWriter.credentialsId}\n storageConfigurationId: ${logBucket.storageConfigurationId}\n deliveryPathPrefix: audit-logs\n configName: Audit Logs\n logType: AUDIT_LOGS\n outputFormat: JSON\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", "properties": { "accountId": { "type": "string", @@ -20262,7 +20439,7 @@ "items": { "type": "integer" }, - "description": "By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces.\n" + "description": "By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces.\n" } }, "required": [ @@ -20330,7 +20507,7 @@ "items": { "type": "integer" }, - "description": "By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces.\n", + "description": "By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces.\n", "willReplaceOnChanges": true } }, @@ -20398,7 +20575,7 @@ "items": { "type": "integer" }, - "description": "By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces.\n", + "description": "By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces.\n", "willReplaceOnChanges": true } }, @@ -20712,7 +20889,7 @@ } }, "databricks:index/mwsNetworks:MwsNetworks": { - "description": "## Databricks on AWS usage\n\n\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nUse this resource to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources.\n\n* Databricks must have access to at least two subnets for each workspace, with each subnet in a different Availability Zone. You cannot specify more than one Databricks workspace subnet per Availability Zone in the Create network configuration API call. You can have more than one subnet per Availability Zone as part of your network setup, but you can choose only one subnet per Availability Zone for the Databricks workspace.\n* Databricks assigns two IP addresses per node, one for management traffic and one for Spark applications. The total number of instances for each subnet is equal to half of the available IP addresses.\n* Each subnet must have a netmask between /17 and /25.\n* Subnets must be private.\n* Subnets must have outbound access to the public network using a aws_nat_gateway, or other similar customer-managed appliance infrastructure.\n* The NAT gateway must be set up in its subnet (public_subnets in the example below) that routes quad-zero (0.0.0.0/0) traffic to an internet gateway or other customer-managed appliance infrastructure.\n\n\u003e **Note** The NAT gateway needs only one IP address per AZ. Hence, the public subnet only needs two IP addresses. In order to limit the number of IP addresses in the public subnet, you can specify a secondary CIDR block (cidr_block_public) using the argument secondary_cidr_blocks then pass it to the public_subnets argument. Please review the [IPv4 CIDR block association restrictions](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) when choosing the secondary cidr block.\n\nPlease follow this complete runnable example \u0026 subnet for new workspaces within GCP. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources.\n\n* Databricks must have access to a subnet in the same region as the workspace, of which IP range will be used to allocate your workspace’s GKE cluster nodes.\n* The subnet must have a netmask between /29 and /9.\n* Databricks must have access to 2 secondary IP ranges, one between /21 to /9 for workspace’s GKE cluster pods, and one between /27 to /16 for workspace’s GKE cluster services.\n* Subnet must have outbound access to the public network using a gcp_compute_router_nat or other similar customer-managed appliance infrastructure.\n\nPlease follow this complete runnable example]\n private_subnets = [cidrsubnet(var.cidr_block, 3, 1),\n cidrsubnet(var.cidr_block, 3, 2)]\n\n default_security_group_egress = [{\n cidr_blocks = \"0.0.0.0/0\"\n }]\n\n default_security_group_ingress = [{\n description = \"Allow all internal TCP and UDP\"\n self = true\n }]\n}\n\nresource \"databricks.MwsNetworks\" \"this\" {\n provider = databricks.mws\n account_id = var.databricks_account_id\n network_name = \"${local.prefix}-network\"\n security_group_ids = [module.vpc.default_security_group_id]\n subnet_ids = module.vpc.private_subnets\n vpc_id = module.vpc.vpc_id\n}\n```\n\nIn order to create a VPC [that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) you would need to add the `vpc_endpoint_id` Attributes from [mws_vpc_endpoint](mws_vpc_endpoint.md) resources into the [databricks_mws_networks](databricks_mws_networks.md) resource. For example:\n\n```hcl\nresource \"databricks_mws_networks\" \"this\" {\n provider = databricks.mws\n account_id = var.databricks_account_id\n network_name = \"${local.prefix}-network\"\n security_group_ids = [module.vpc.default_security_group_id]\n subnet_ids = module.vpc.private_subnets\n vpc_id = module.vpc.vpc_id\n vpc_endpoints {\n dataplane_relay = [databricks_mws_vpc_endpoint.relay.vpc_endpoint_id]\n rest_api = [databricks_mws_vpc_endpoint.workspace.vpc_endpoint_id]\n }\n depends_on = [aws_vpc_endpoint.workspace, aws_vpc_endpoint.relay]\n}\n```\n\n### Creating a Databricks on GCP workspace\n\n```hcl\nvariable \"databricks_account_id\" {\n description = \"Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\"\n}\n\nresource \"google_compute_network\" \"dbx_private_vpc\" {\n project = var.google_project\n name = \"tf-network-${random_string.suffix.result}\"\n auto_create_subnetworks = false\n}\n\nresource \"google_compute_subnetwork\" \"network-with-private-secondary-ip-ranges\" {\n name = \"test-dbx-${random_string.suffix.result}\"\n ip_cidr_range = \"10.0.0.0/16\"\n region = \"us-central1\"\n network = google_compute_network.dbx_private_vpc.id\n secondary_ip_range {\n range_name = \"pods\"\n ip_cidr_range = \"10.1.0.0/16\"\n }\n secondary_ip_range {\n range_name = \"svc\"\n ip_cidr_range = \"10.2.0.0/20\"\n }\n private_ip_google_access = true\n}\n\nresource \"google_compute_router\" \"router\" {\n name = \"my-router-${random_string.suffix.result}\"\n region = google_compute_subnetwork.network-with-private-secondary-ip-ranges.region\n network = google_compute_network.dbx_private_vpc.id\n}\n\nresource \"google_compute_router_nat\" \"nat\" {\n name = \"my-router-nat-${random_string.suffix.result}\"\n router = google_compute_router.router.name\n region = google_compute_router.router.region\n nat_ip_allocate_option = \"AUTO_ONLY\"\n source_subnetwork_ip_ranges_to_nat = \"ALL_SUBNETWORKS_ALL_IP_RANGES\"\n}\n\nresource \"databricks_mws_networks\" \"this\" {\n account_id = var.databricks_account_id\n network_name = \"test-demo-${random_string.suffix.result}\"\n gcp_network_info {\n network_project_id = var.google_project\n vpc_id = google_compute_network.dbx_private_vpc.name\n subnet_id = google_compute_subnetwork.network_with_private_secondary_ip_ranges.name\n subnet_region = google_compute_subnetwork.network_with_private_secondary_ip_ranges.region\n pod_ip_range_name = \"pods\"\n service_ip_range_name = \"svc\"\n }\n}\n```\n\nIn order to create a VPC [that leverages GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) you would need to add the `vpc_endpoint_id` Attributes from mws_vpc_endpoint resources into the databricks.MwsNetworks resource. For example:\n\n```hcl\nresource \"databricks_mws_networks\" \"this\" {\n account_id = var.databricks_account_id\n network_name = \"test-demo-${random_string.suffix.result}\"\n gcp_network_info {\n network_project_id = var.google_project\n vpc_id = google_compute_network.dbx_private_vpc.name\n subnet_id = google_compute_subnetwork.network_with_private_secondary_ip_ranges.name\n subnet_region = google_compute_subnetwork.network_with_private_secondary_ip_ranges.region\n pod_ip_range_name = \"pods\"\n service_ip_range_name = \"svc\"\n }\n vpc_endpoints {\n dataplane_relay = [databricks_mws_vpc_endpoint.relay.vpc_endpoint_id]\n rest_api = [databricks_mws_vpc_endpoint.workspace.vpc_endpoint_id]\n }\n}\n```\n\n## Modifying networks on running workspaces (AWS only)\n\nDue to specifics of platform APIs, changing any attribute of network configuration would cause `databricks.MwsNetworks` to be re-created - deleted \u0026 added again with special case for running workspaces. Once network configuration is attached to a running databricks_mws_workspaces, you cannot delete it and `pulumi up` would result in `INVALID_STATE: Unable to delete, Network is being used by active workspace X` error. In order to modify any attributes of a network, you have to perform three different `pulumi up` steps:\n\n1. Create a new `databricks.MwsNetworks` resource.\n2. Update the `databricks.MwsWorkspaces` to point to the new `network_id`.\n3. Delete the old `databricks.MwsNetworks` resource.\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* Provisioning Databricks on AWS with PrivateLink guide.\n* Provisioning AWS Databricks E2 with a Hub \u0026 Spoke firewall for data exfiltration protection guide.\n* Provisioning Databricks on GCP guide.\n* Provisioning Databricks workspaces on GCP with Private Service Connect guide.\n* databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration.\n* databricks.MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html).\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", + "description": "## Databricks on AWS usage\n\n\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nUse this resource to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources.\n\n* Databricks must have access to at least two subnets for each workspace, with each subnet in a different Availability Zone. You cannot specify more than one Databricks workspace subnet per Availability Zone in the Create network configuration API call. You can have more than one subnet per Availability Zone as part of your network setup, but you can choose only one subnet per Availability Zone for the Databricks workspace.\n* Databricks assigns two IP addresses per node, one for management traffic and one for Spark applications. The total number of instances for each subnet is equal to half of the available IP addresses.\n* Each subnet must have a netmask between /17 and /25.\n* Subnets must be private.\n* Subnets must have outbound access to the public network using a aws_nat_gateway, or other similar customer-managed appliance infrastructure.\n* The NAT gateway must be set up in its subnet (public_subnets in the example below) that routes quad-zero (0.0.0.0/0) traffic to an internet gateway or other customer-managed appliance infrastructure.\n\n\u003e **Note** The NAT gateway needs only one IP address per AZ. Hence, the public subnet only needs two IP addresses. In order to limit the number of IP addresses in the public subnet, you can specify a secondary CIDR block (cidr_block_public) using the argument secondary_cidr_blocks then pass it to the public_subnets argument. Please review the [IPv4 CIDR block association restrictions](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) when choosing the secondary cidr block.\n\nPlease follow this complete runnable example \u0026 subnet for new workspaces within GCP. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources.\n\n* Databricks must have access to a subnet in the same region as the workspace, of which IP range will be used to allocate your workspace’s GKE cluster nodes.\n* The subnet must have a netmask between /29 and /9.\n* Databricks must have access to 2 secondary IP ranges, one between /21 to /9 for workspace’s GKE cluster pods, and one between /27 to /16 for workspace’s GKE cluster services.\n* Subnet must have outbound access to the public network using a gcp_compute_router_nat or other similar customer-managed appliance infrastructure.\n\nPlease follow this complete runnable example]\n private_subnets = [cidrsubnet(var.cidr_block, 3, 1),\n cidrsubnet(var.cidr_block, 3, 2)]\n\n default_security_group_egress = [{\n cidr_blocks = \"0.0.0.0/0\"\n }]\n\n default_security_group_ingress = [{\n description = \"Allow all internal TCP and UDP\"\n self = true\n }]\n}\n\nresource \"databricks.MwsNetworks\" \"this\" {\n provider = databricks.mws\n account_id = var.databricks_account_id\n network_name = \"${local.prefix}-network\"\n security_group_ids = [module.vpc.default_security_group_id]\n subnet_ids = module.vpc.private_subnets\n vpc_id = module.vpc.vpc_id\n}\n```\n\nIn order to create a VPC [that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) you would need to add the `vpc_endpoint_id` Attributes from [mws_vpc_endpoint](mws_vpc_endpoint.md) resources into the [databricks_mws_networks](databricks_mws_networks.md) resource. For example:\n\n```hcl\nresource \"databricks_mws_networks\" \"this\" {\n provider = databricks.mws\n account_id = var.databricks_account_id\n network_name = \"${local.prefix}-network\"\n security_group_ids = [module.vpc.default_security_group_id]\n subnet_ids = module.vpc.private_subnets\n vpc_id = module.vpc.vpc_id\n vpc_endpoints {\n dataplane_relay = [databricks_mws_vpc_endpoint.relay.vpc_endpoint_id]\n rest_api = [databricks_mws_vpc_endpoint.workspace.vpc_endpoint_id]\n }\n depends_on = [aws_vpc_endpoint.workspace, aws_vpc_endpoint.relay]\n}\n```\n\n### Creating a Databricks on GCP workspace\n\n```hcl\nvariable \"databricks_account_id\" {\n description = \"Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\"\n}\n\nresource \"google_compute_network\" \"dbx_private_vpc\" {\n project = var.google_project\n name = \"tf-network-${random_string.suffix.result}\"\n auto_create_subnetworks = false\n}\n\nresource \"google_compute_subnetwork\" \"network-with-private-secondary-ip-ranges\" {\n name = \"test-dbx-${random_string.suffix.result}\"\n ip_cidr_range = \"10.0.0.0/16\"\n region = \"us-central1\"\n network = google_compute_network.dbx_private_vpc.id\n secondary_ip_range {\n range_name = \"pods\"\n ip_cidr_range = \"10.1.0.0/16\"\n }\n secondary_ip_range {\n range_name = \"svc\"\n ip_cidr_range = \"10.2.0.0/20\"\n }\n private_ip_google_access = true\n}\n\nresource \"google_compute_router\" \"router\" {\n name = \"my-router-${random_string.suffix.result}\"\n region = google_compute_subnetwork.network-with-private-secondary-ip-ranges.region\n network = google_compute_network.dbx_private_vpc.id\n}\n\nresource \"google_compute_router_nat\" \"nat\" {\n name = \"my-router-nat-${random_string.suffix.result}\"\n router = google_compute_router.router.name\n region = google_compute_router.router.region\n nat_ip_allocate_option = \"AUTO_ONLY\"\n source_subnetwork_ip_ranges_to_nat = \"ALL_SUBNETWORKS_ALL_IP_RANGES\"\n}\n\nresource \"databricks_mws_networks\" \"this\" {\n account_id = var.databricks_account_id\n network_name = \"test-demo-${random_string.suffix.result}\"\n gcp_network_info {\n network_project_id = var.google_project\n vpc_id = google_compute_network.dbx_private_vpc.name\n subnet_id = google_compute_subnetwork.network_with_private_secondary_ip_ranges.name\n subnet_region = google_compute_subnetwork.network_with_private_secondary_ip_ranges.region\n pod_ip_range_name = \"pods\"\n service_ip_range_name = \"svc\"\n }\n}\n```\n\nIn order to create a VPC [that leverages GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) you would need to add the `vpc_endpoint_id` Attributes from mws_vpc_endpoint resources into the databricks.MwsNetworks resource. For example:\n\n```hcl\nresource \"databricks_mws_networks\" \"this\" {\n account_id = var.databricks_account_id\n network_name = \"test-demo-${random_string.suffix.result}\"\n gcp_network_info {\n network_project_id = var.google_project\n vpc_id = google_compute_network.dbx_private_vpc.name\n subnet_id = google_compute_subnetwork.network_with_private_secondary_ip_ranges.name\n subnet_region = google_compute_subnetwork.network_with_private_secondary_ip_ranges.region\n pod_ip_range_name = \"pods\"\n service_ip_range_name = \"svc\"\n }\n vpc_endpoints {\n dataplane_relay = [databricks_mws_vpc_endpoint.relay.vpc_endpoint_id]\n rest_api = [databricks_mws_vpc_endpoint.workspace.vpc_endpoint_id]\n }\n}\n```\n\n## Modifying networks on running workspaces (AWS only)\n\nDue to specifics of platform APIs, changing any attribute of network configuration would cause `databricks.MwsNetworks` to be re-created - deleted \u0026 added again with special case for running workspaces. Once network configuration is attached to a running databricks_mws_workspaces, you cannot delete it and `pulumi up` would result in `INVALID_STATE: Unable to delete, Network is being used by active workspace X` error. In order to modify any attributes of a network, you have to perform three different `pulumi up` steps:\n\n1. Create a new `databricks.MwsNetworks` resource.\n2. Update the `databricks.MwsWorkspaces` to point to the new `network_id`.\n3. Delete the old `databricks.MwsNetworks` resource.\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* Provisioning Databricks on AWS with Private Link guide.\n* Provisioning AWS Databricks workspaces with a Hub \u0026 Spoke firewall for data exfiltration protection guide.\n* Provisioning Databricks on GCP guide.\n* Provisioning Databricks workspaces on GCP with Private Service Connect guide.\n* databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration.\n* databricks.MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html).\n* databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", "properties": { "accountId": { "type": "string", @@ -20995,7 +21172,7 @@ } }, "databricks:index/mwsPrivateAccessSettings:MwsPrivateAccessSettings": { - "description": "Allows you to create a Private Access Setting resource that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html)\n\nIt is strongly recommended that customers read the [Enable AWS Private Link](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) [Enable GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) documentation before trying to leverage this resource.\n\n## Databricks on AWS usage\n\n\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst pas = new databricks.MwsPrivateAccessSettings(\"pas\", {\n accountId: databricksAccountId,\n privateAccessSettingsName: `Private Access Settings for ${prefix}`,\n region: region,\n publicAccessEnabled: true,\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\npas = databricks.MwsPrivateAccessSettings(\"pas\",\n account_id=databricks_account_id,\n private_access_settings_name=f\"Private Access Settings for {prefix}\",\n region=region,\n public_access_enabled=True)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pas = new Databricks.MwsPrivateAccessSettings(\"pas\", new()\n {\n AccountId = databricksAccountId,\n PrivateAccessSettingsName = $\"Private Access Settings for {prefix}\",\n Region = region,\n PublicAccessEnabled = true,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsPrivateAccessSettings(ctx, \"pas\", \u0026databricks.MwsPrivateAccessSettingsArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tPrivateAccessSettingsName: pulumi.String(fmt.Sprintf(\"Private Access Settings for %v\", prefix)),\n\t\t\tRegion: pulumi.Any(region),\n\t\t\tPublicAccessEnabled: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsPrivateAccessSettings;\nimport com.pulumi.databricks.MwsPrivateAccessSettingsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pas = new MwsPrivateAccessSettings(\"pas\", MwsPrivateAccessSettingsArgs.builder()\n .accountId(databricksAccountId)\n .privateAccessSettingsName(String.format(\"Private Access Settings for %s\", prefix))\n .region(region)\n .publicAccessEnabled(true)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pas:\n type: databricks:MwsPrivateAccessSettings\n properties:\n accountId: ${databricksAccountId}\n privateAccessSettingsName: Private Access Settings for ${prefix}\n region: ${region}\n publicAccessEnabled: true\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nThe `databricks_mws_private_access_settings.pas.private_access_settings_id` can then be used as part of a databricks.MwsWorkspaces resource:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.MwsWorkspaces(\"this\", {\n awsRegion: region,\n workspaceName: prefix,\n credentialsId: thisDatabricksMwsCredentials.credentialsId,\n storageConfigurationId: thisDatabricksMwsStorageConfigurations.storageConfigurationId,\n networkId: thisDatabricksMwsNetworks.networkId,\n privateAccessSettingsId: pas.privateAccessSettingsId,\n pricingTier: \"ENTERPRISE\",\n}, {\n dependsOn: [thisDatabricksMwsNetworks],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.MwsWorkspaces(\"this\",\n aws_region=region,\n workspace_name=prefix,\n credentials_id=this_databricks_mws_credentials[\"credentialsId\"],\n storage_configuration_id=this_databricks_mws_storage_configurations[\"storageConfigurationId\"],\n network_id=this_databricks_mws_networks[\"networkId\"],\n private_access_settings_id=pas[\"privateAccessSettingsId\"],\n pricing_tier=\"ENTERPRISE\",\n opts = pulumi.ResourceOptions(depends_on=[this_databricks_mws_networks]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.MwsWorkspaces(\"this\", new()\n {\n AwsRegion = region,\n WorkspaceName = prefix,\n CredentialsId = thisDatabricksMwsCredentials.CredentialsId,\n StorageConfigurationId = thisDatabricksMwsStorageConfigurations.StorageConfigurationId,\n NetworkId = thisDatabricksMwsNetworks.NetworkId,\n PrivateAccessSettingsId = pas.PrivateAccessSettingsId,\n PricingTier = \"ENTERPRISE\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n thisDatabricksMwsNetworks,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsWorkspaces(ctx, \"this\", \u0026databricks.MwsWorkspacesArgs{\n\t\t\tAwsRegion: pulumi.Any(region),\n\t\t\tWorkspaceName: pulumi.Any(prefix),\n\t\t\tCredentialsId: pulumi.Any(thisDatabricksMwsCredentials.CredentialsId),\n\t\t\tStorageConfigurationId: pulumi.Any(thisDatabricksMwsStorageConfigurations.StorageConfigurationId),\n\t\t\tNetworkId: pulumi.Any(thisDatabricksMwsNetworks.NetworkId),\n\t\t\tPrivateAccessSettingsId: pulumi.Any(pas.PrivateAccessSettingsId),\n\t\t\tPricingTier: pulumi.String(\"ENTERPRISE\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tthisDatabricksMwsNetworks,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsWorkspaces;\nimport com.pulumi.databricks.MwsWorkspacesArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new MwsWorkspaces(\"this\", MwsWorkspacesArgs.builder()\n .awsRegion(region)\n .workspaceName(prefix)\n .credentialsId(thisDatabricksMwsCredentials.credentialsId())\n .storageConfigurationId(thisDatabricksMwsStorageConfigurations.storageConfigurationId())\n .networkId(thisDatabricksMwsNetworks.networkId())\n .privateAccessSettingsId(pas.privateAccessSettingsId())\n .pricingTier(\"ENTERPRISE\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(thisDatabricksMwsNetworks)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:MwsWorkspaces\n properties:\n awsRegion: ${region}\n workspaceName: ${prefix}\n credentialsId: ${thisDatabricksMwsCredentials.credentialsId}\n storageConfigurationId: ${thisDatabricksMwsStorageConfigurations.storageConfigurationId}\n networkId: ${thisDatabricksMwsNetworks.networkId}\n privateAccessSettingsId: ${pas.privateAccessSettingsId}\n pricingTier: ENTERPRISE\n options:\n dependson:\n - ${thisDatabricksMwsNetworks}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Databricks on GCP usage\n\n\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.gcp.databricks.com\"` and use `provider = databricks.mws`\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.MwsWorkspaces(\"this\", {\n workspaceName: \"gcp-workspace\",\n location: subnetRegion,\n cloudResourceContainer: {\n gcp: {\n projectId: googleProject,\n },\n },\n gkeConfig: {\n connectivityType: \"PRIVATE_NODE_PUBLIC_MASTER\",\n masterIpRange: \"10.3.0.0/28\",\n },\n networkId: thisDatabricksMwsNetworks.networkId,\n privateAccessSettingsId: pas.privateAccessSettingsId,\n pricingTier: \"PREMIUM\",\n}, {\n dependsOn: [thisDatabricksMwsNetworks],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.MwsWorkspaces(\"this\",\n workspace_name=\"gcp-workspace\",\n location=subnet_region,\n cloud_resource_container=databricks.MwsWorkspacesCloudResourceContainerArgs(\n gcp=databricks.MwsWorkspacesCloudResourceContainerGcpArgs(\n project_id=google_project,\n ),\n ),\n gke_config=databricks.MwsWorkspacesGkeConfigArgs(\n connectivity_type=\"PRIVATE_NODE_PUBLIC_MASTER\",\n master_ip_range=\"10.3.0.0/28\",\n ),\n network_id=this_databricks_mws_networks[\"networkId\"],\n private_access_settings_id=pas[\"privateAccessSettingsId\"],\n pricing_tier=\"PREMIUM\",\n opts = pulumi.ResourceOptions(depends_on=[this_databricks_mws_networks]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.MwsWorkspaces(\"this\", new()\n {\n WorkspaceName = \"gcp-workspace\",\n Location = subnetRegion,\n CloudResourceContainer = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerArgs\n {\n Gcp = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerGcpArgs\n {\n ProjectId = googleProject,\n },\n },\n GkeConfig = new Databricks.Inputs.MwsWorkspacesGkeConfigArgs\n {\n ConnectivityType = \"PRIVATE_NODE_PUBLIC_MASTER\",\n MasterIpRange = \"10.3.0.0/28\",\n },\n NetworkId = thisDatabricksMwsNetworks.NetworkId,\n PrivateAccessSettingsId = pas.PrivateAccessSettingsId,\n PricingTier = \"PREMIUM\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n thisDatabricksMwsNetworks,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsWorkspaces(ctx, \"this\", \u0026databricks.MwsWorkspacesArgs{\n\t\t\tWorkspaceName: pulumi.String(\"gcp-workspace\"),\n\t\t\tLocation: pulumi.Any(subnetRegion),\n\t\t\tCloudResourceContainer: \u0026databricks.MwsWorkspacesCloudResourceContainerArgs{\n\t\t\t\tGcp: \u0026databricks.MwsWorkspacesCloudResourceContainerGcpArgs{\n\t\t\t\t\tProjectId: pulumi.Any(googleProject),\n\t\t\t\t},\n\t\t\t},\n\t\t\tGkeConfig: \u0026databricks.MwsWorkspacesGkeConfigArgs{\n\t\t\t\tConnectivityType: pulumi.String(\"PRIVATE_NODE_PUBLIC_MASTER\"),\n\t\t\t\tMasterIpRange: pulumi.String(\"10.3.0.0/28\"),\n\t\t\t},\n\t\t\tNetworkId: pulumi.Any(thisDatabricksMwsNetworks.NetworkId),\n\t\t\tPrivateAccessSettingsId: pulumi.Any(pas.PrivateAccessSettingsId),\n\t\t\tPricingTier: pulumi.String(\"PREMIUM\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tthisDatabricksMwsNetworks,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsWorkspaces;\nimport com.pulumi.databricks.MwsWorkspacesArgs;\nimport com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerArgs;\nimport com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerGcpArgs;\nimport com.pulumi.databricks.inputs.MwsWorkspacesGkeConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new MwsWorkspaces(\"this\", MwsWorkspacesArgs.builder()\n .workspaceName(\"gcp-workspace\")\n .location(subnetRegion)\n .cloudResourceContainer(MwsWorkspacesCloudResourceContainerArgs.builder()\n .gcp(MwsWorkspacesCloudResourceContainerGcpArgs.builder()\n .projectId(googleProject)\n .build())\n .build())\n .gkeConfig(MwsWorkspacesGkeConfigArgs.builder()\n .connectivityType(\"PRIVATE_NODE_PUBLIC_MASTER\")\n .masterIpRange(\"10.3.0.0/28\")\n .build())\n .networkId(thisDatabricksMwsNetworks.networkId())\n .privateAccessSettingsId(pas.privateAccessSettingsId())\n .pricingTier(\"PREMIUM\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(thisDatabricksMwsNetworks)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:MwsWorkspaces\n properties:\n workspaceName: gcp-workspace\n location: ${subnetRegion}\n cloudResourceContainer:\n gcp:\n projectId: ${googleProject}\n gkeConfig:\n connectivityType: PRIVATE_NODE_PUBLIC_MASTER\n masterIpRange: 10.3.0.0/28\n networkId: ${thisDatabricksMwsNetworks.networkId}\n privateAccessSettingsId: ${pas.privateAccessSettingsId}\n pricingTier: PREMIUM\n options:\n dependson:\n - ${thisDatabricksMwsNetworks}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* Provisioning Databricks on AWS with PrivateLink guide.\n* Provisioning AWS Databricks E2 with a Hub \u0026 Spoke firewall for data exfiltration protection guide.\n* Provisioning Databricks workspaces on GCP with Private Service Connect guide.\n* databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration.\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", + "description": "Allows you to create a Private Access Setting resource that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html)\n\nIt is strongly recommended that customers read the [Enable AWS Private Link](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) [Enable GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html) documentation before trying to leverage this resource.\n\n## Databricks on AWS usage\n\n\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst pas = new databricks.MwsPrivateAccessSettings(\"pas\", {\n accountId: databricksAccountId,\n privateAccessSettingsName: `Private Access Settings for ${prefix}`,\n region: region,\n publicAccessEnabled: true,\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\npas = databricks.MwsPrivateAccessSettings(\"pas\",\n account_id=databricks_account_id,\n private_access_settings_name=f\"Private Access Settings for {prefix}\",\n region=region,\n public_access_enabled=True)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pas = new Databricks.MwsPrivateAccessSettings(\"pas\", new()\n {\n AccountId = databricksAccountId,\n PrivateAccessSettingsName = $\"Private Access Settings for {prefix}\",\n Region = region,\n PublicAccessEnabled = true,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsPrivateAccessSettings(ctx, \"pas\", \u0026databricks.MwsPrivateAccessSettingsArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tPrivateAccessSettingsName: pulumi.String(fmt.Sprintf(\"Private Access Settings for %v\", prefix)),\n\t\t\tRegion: pulumi.Any(region),\n\t\t\tPublicAccessEnabled: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsPrivateAccessSettings;\nimport com.pulumi.databricks.MwsPrivateAccessSettingsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pas = new MwsPrivateAccessSettings(\"pas\", MwsPrivateAccessSettingsArgs.builder()\n .accountId(databricksAccountId)\n .privateAccessSettingsName(String.format(\"Private Access Settings for %s\", prefix))\n .region(region)\n .publicAccessEnabled(true)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pas:\n type: databricks:MwsPrivateAccessSettings\n properties:\n accountId: ${databricksAccountId}\n privateAccessSettingsName: Private Access Settings for ${prefix}\n region: ${region}\n publicAccessEnabled: true\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nThe `databricks_mws_private_access_settings.pas.private_access_settings_id` can then be used as part of a databricks.MwsWorkspaces resource:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.MwsWorkspaces(\"this\", {\n awsRegion: region,\n workspaceName: prefix,\n credentialsId: thisDatabricksMwsCredentials.credentialsId,\n storageConfigurationId: thisDatabricksMwsStorageConfigurations.storageConfigurationId,\n networkId: thisDatabricksMwsNetworks.networkId,\n privateAccessSettingsId: pas.privateAccessSettingsId,\n pricingTier: \"ENTERPRISE\",\n}, {\n dependsOn: [thisDatabricksMwsNetworks],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.MwsWorkspaces(\"this\",\n aws_region=region,\n workspace_name=prefix,\n credentials_id=this_databricks_mws_credentials[\"credentialsId\"],\n storage_configuration_id=this_databricks_mws_storage_configurations[\"storageConfigurationId\"],\n network_id=this_databricks_mws_networks[\"networkId\"],\n private_access_settings_id=pas[\"privateAccessSettingsId\"],\n pricing_tier=\"ENTERPRISE\",\n opts = pulumi.ResourceOptions(depends_on=[this_databricks_mws_networks]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.MwsWorkspaces(\"this\", new()\n {\n AwsRegion = region,\n WorkspaceName = prefix,\n CredentialsId = thisDatabricksMwsCredentials.CredentialsId,\n StorageConfigurationId = thisDatabricksMwsStorageConfigurations.StorageConfigurationId,\n NetworkId = thisDatabricksMwsNetworks.NetworkId,\n PrivateAccessSettingsId = pas.PrivateAccessSettingsId,\n PricingTier = \"ENTERPRISE\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n thisDatabricksMwsNetworks,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsWorkspaces(ctx, \"this\", \u0026databricks.MwsWorkspacesArgs{\n\t\t\tAwsRegion: pulumi.Any(region),\n\t\t\tWorkspaceName: pulumi.Any(prefix),\n\t\t\tCredentialsId: pulumi.Any(thisDatabricksMwsCredentials.CredentialsId),\n\t\t\tStorageConfigurationId: pulumi.Any(thisDatabricksMwsStorageConfigurations.StorageConfigurationId),\n\t\t\tNetworkId: pulumi.Any(thisDatabricksMwsNetworks.NetworkId),\n\t\t\tPrivateAccessSettingsId: pulumi.Any(pas.PrivateAccessSettingsId),\n\t\t\tPricingTier: pulumi.String(\"ENTERPRISE\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tthisDatabricksMwsNetworks,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsWorkspaces;\nimport com.pulumi.databricks.MwsWorkspacesArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new MwsWorkspaces(\"this\", MwsWorkspacesArgs.builder()\n .awsRegion(region)\n .workspaceName(prefix)\n .credentialsId(thisDatabricksMwsCredentials.credentialsId())\n .storageConfigurationId(thisDatabricksMwsStorageConfigurations.storageConfigurationId())\n .networkId(thisDatabricksMwsNetworks.networkId())\n .privateAccessSettingsId(pas.privateAccessSettingsId())\n .pricingTier(\"ENTERPRISE\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(thisDatabricksMwsNetworks)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:MwsWorkspaces\n properties:\n awsRegion: ${region}\n workspaceName: ${prefix}\n credentialsId: ${thisDatabricksMwsCredentials.credentialsId}\n storageConfigurationId: ${thisDatabricksMwsStorageConfigurations.storageConfigurationId}\n networkId: ${thisDatabricksMwsNetworks.networkId}\n privateAccessSettingsId: ${pas.privateAccessSettingsId}\n pricingTier: ENTERPRISE\n options:\n dependson:\n - ${thisDatabricksMwsNetworks}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Databricks on GCP usage\n\n\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.gcp.databricks.com\"` and use `provider = databricks.mws`\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.MwsWorkspaces(\"this\", {\n workspaceName: \"gcp-workspace\",\n location: subnetRegion,\n cloudResourceContainer: {\n gcp: {\n projectId: googleProject,\n },\n },\n gkeConfig: {\n connectivityType: \"PRIVATE_NODE_PUBLIC_MASTER\",\n masterIpRange: \"10.3.0.0/28\",\n },\n networkId: thisDatabricksMwsNetworks.networkId,\n privateAccessSettingsId: pas.privateAccessSettingsId,\n pricingTier: \"PREMIUM\",\n}, {\n dependsOn: [thisDatabricksMwsNetworks],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.MwsWorkspaces(\"this\",\n workspace_name=\"gcp-workspace\",\n location=subnet_region,\n cloud_resource_container=databricks.MwsWorkspacesCloudResourceContainerArgs(\n gcp=databricks.MwsWorkspacesCloudResourceContainerGcpArgs(\n project_id=google_project,\n ),\n ),\n gke_config=databricks.MwsWorkspacesGkeConfigArgs(\n connectivity_type=\"PRIVATE_NODE_PUBLIC_MASTER\",\n master_ip_range=\"10.3.0.0/28\",\n ),\n network_id=this_databricks_mws_networks[\"networkId\"],\n private_access_settings_id=pas[\"privateAccessSettingsId\"],\n pricing_tier=\"PREMIUM\",\n opts = pulumi.ResourceOptions(depends_on=[this_databricks_mws_networks]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.MwsWorkspaces(\"this\", new()\n {\n WorkspaceName = \"gcp-workspace\",\n Location = subnetRegion,\n CloudResourceContainer = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerArgs\n {\n Gcp = new Databricks.Inputs.MwsWorkspacesCloudResourceContainerGcpArgs\n {\n ProjectId = googleProject,\n },\n },\n GkeConfig = new Databricks.Inputs.MwsWorkspacesGkeConfigArgs\n {\n ConnectivityType = \"PRIVATE_NODE_PUBLIC_MASTER\",\n MasterIpRange = \"10.3.0.0/28\",\n },\n NetworkId = thisDatabricksMwsNetworks.NetworkId,\n PrivateAccessSettingsId = pas.PrivateAccessSettingsId,\n PricingTier = \"PREMIUM\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n thisDatabricksMwsNetworks,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewMwsWorkspaces(ctx, \"this\", \u0026databricks.MwsWorkspacesArgs{\n\t\t\tWorkspaceName: pulumi.String(\"gcp-workspace\"),\n\t\t\tLocation: pulumi.Any(subnetRegion),\n\t\t\tCloudResourceContainer: \u0026databricks.MwsWorkspacesCloudResourceContainerArgs{\n\t\t\t\tGcp: \u0026databricks.MwsWorkspacesCloudResourceContainerGcpArgs{\n\t\t\t\t\tProjectId: pulumi.Any(googleProject),\n\t\t\t\t},\n\t\t\t},\n\t\t\tGkeConfig: \u0026databricks.MwsWorkspacesGkeConfigArgs{\n\t\t\t\tConnectivityType: pulumi.String(\"PRIVATE_NODE_PUBLIC_MASTER\"),\n\t\t\t\tMasterIpRange: pulumi.String(\"10.3.0.0/28\"),\n\t\t\t},\n\t\t\tNetworkId: pulumi.Any(thisDatabricksMwsNetworks.NetworkId),\n\t\t\tPrivateAccessSettingsId: pulumi.Any(pas.PrivateAccessSettingsId),\n\t\t\tPricingTier: pulumi.String(\"PREMIUM\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tthisDatabricksMwsNetworks,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsWorkspaces;\nimport com.pulumi.databricks.MwsWorkspacesArgs;\nimport com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerArgs;\nimport com.pulumi.databricks.inputs.MwsWorkspacesCloudResourceContainerGcpArgs;\nimport com.pulumi.databricks.inputs.MwsWorkspacesGkeConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new MwsWorkspaces(\"this\", MwsWorkspacesArgs.builder()\n .workspaceName(\"gcp-workspace\")\n .location(subnetRegion)\n .cloudResourceContainer(MwsWorkspacesCloudResourceContainerArgs.builder()\n .gcp(MwsWorkspacesCloudResourceContainerGcpArgs.builder()\n .projectId(googleProject)\n .build())\n .build())\n .gkeConfig(MwsWorkspacesGkeConfigArgs.builder()\n .connectivityType(\"PRIVATE_NODE_PUBLIC_MASTER\")\n .masterIpRange(\"10.3.0.0/28\")\n .build())\n .networkId(thisDatabricksMwsNetworks.networkId())\n .privateAccessSettingsId(pas.privateAccessSettingsId())\n .pricingTier(\"PREMIUM\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(thisDatabricksMwsNetworks)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:MwsWorkspaces\n properties:\n workspaceName: gcp-workspace\n location: ${subnetRegion}\n cloudResourceContainer:\n gcp:\n projectId: ${googleProject}\n gkeConfig:\n connectivityType: PRIVATE_NODE_PUBLIC_MASTER\n masterIpRange: 10.3.0.0/28\n networkId: ${thisDatabricksMwsNetworks.networkId}\n privateAccessSettingsId: ${pas.privateAccessSettingsId}\n pricingTier: PREMIUM\n options:\n dependson:\n - ${thisDatabricksMwsNetworks}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* Provisioning Databricks on AWS with Private Link guide.\n* Provisioning AWS Databricks workspaces with a Hub \u0026 Spoke firewall for data exfiltration protection guide.\n* Provisioning Databricks workspaces on GCP with Private Service Connect guide.\n* databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration.\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", "properties": { "accountId": { "type": "string", @@ -21111,7 +21288,7 @@ } }, "databricks:index/mwsStorageConfigurations:MwsStorageConfigurations": { - "description": "\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nThis resource to configure root bucket new workspaces within AWS.\n\nIt is important to understand that this will require you to configure your provider separately for the multiple workspaces resources. This will point to \u003chttps://accounts.cloud.databricks.com\u003e for the HOST and it will use basic auth as that is the only authentication method available for multiple workspaces api.\n\nPlease follow this complete runnable example\n* `storage_configuration_name` - name under which this storage configuration is stored\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* Provisioning Databricks on AWS with PrivateLink guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", + "description": "\u003e **Note** Initialize provider with `alias = \"mws\"`, `host = \"https://accounts.cloud.databricks.com\"` and use `provider = databricks.mws`\n\nThis resource to configure root bucket new workspaces within AWS.\n\nIt is important to understand that this will require you to configure your provider separately for the multiple workspaces resources. This will point to \u003chttps://accounts.cloud.databricks.com\u003e for the HOST and it will use basic auth as that is the only authentication method available for multiple workspaces api.\n\nPlease follow this complete runnable example\n* `storage_configuration_name` - name under which this storage configuration is stored\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* Provisioning Databricks on AWS with Private Link guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported.\n\n", "properties": { "accountId": { "type": "string", @@ -21908,6 +22085,9 @@ "$ref": "#/types/databricks:index/OnlineTableStatus:OnlineTableStatus" }, "description": "object describing status of the online table:\n" + }, + "tableServingUrl": { + "type": "string" } }, "required": [ @@ -21924,6 +22104,10 @@ "$ref": "#/types/databricks:index/OnlineTableSpec:OnlineTableSpec", "description": "object containing specification of the online table:\n", "willReplaceOnChanges": true + }, + "tableServingUrl": { + "type": "string", + "willReplaceOnChanges": true } }, "stateInputs": { @@ -21945,6 +22129,10 @@ "$ref": "#/types/databricks:index/OnlineTableStatus:OnlineTableStatus" }, "description": "object describing status of the online table:\n" + }, + "tableServingUrl": { + "type": "string", + "willReplaceOnChanges": true } }, "type": "object" @@ -24623,7 +24811,8 @@ "description": "If this access control for the entire catalog. Defaults to `false`.\n" }, "clusterId": { - "type": "string" + "type": "string", + "description": "Id of an existing databricks_cluster, otherwise resource creation will fail.\n" }, "database": { "type": "string", @@ -24664,7 +24853,8 @@ "willReplaceOnChanges": true }, "clusterId": { - "type": "string" + "type": "string", + "description": "Id of an existing databricks_cluster, otherwise resource creation will fail.\n" }, "database": { "type": "string", @@ -24707,7 +24897,8 @@ "willReplaceOnChanges": true }, "clusterId": { - "type": "string" + "type": "string", + "description": "Id of an existing databricks_cluster, otherwise resource creation will fail.\n" }, "database": { "type": "string", @@ -26658,7 +26849,7 @@ }, "functions": { "databricks:index/getAwsAssumeRolePolicy:getAwsAssumeRolePolicy": { - "description": "This data source constructs necessary AWS STS assume role policy for you.\n\n## Example Usage\n\nEnd-to-end example of provisioning Cross-account IAM role with databricks_mws_credentials:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst this = databricks.getAwsCrossAccountPolicy({});\nconst crossAccountPolicy = new aws.iam.Policy(\"cross_account_policy\", {\n name: `${prefix}-crossaccount-iam-policy`,\n policy: _this.then(_this =\u003e _this.json),\n});\nconst thisGetAwsAssumeRolePolicy = databricks.getAwsAssumeRolePolicy({\n externalId: databricksAccountId,\n});\nconst crossAccount = new aws.iam.Role(\"cross_account\", {\n name: `${prefix}-crossaccount-iam-role`,\n assumeRolePolicy: thisGetAwsAssumeRolePolicy.then(thisGetAwsAssumeRolePolicy =\u003e thisGetAwsAssumeRolePolicy.json),\n description: \"Grants Databricks full access to VPC resources\",\n});\nconst crossAccountRolePolicyAttachment = new aws.iam.RolePolicyAttachment(\"cross_account\", {\n policyArn: crossAccountPolicy.arn,\n role: crossAccount.name,\n});\n// required only in case of multi-workspace setup\nconst thisMwsCredentials = new databricks.MwsCredentials(\"this\", {\n accountId: databricksAccountId,\n credentialsName: `${prefix}-creds`,\n roleArn: crossAccount.arn,\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\nthis = databricks.get_aws_cross_account_policy()\ncross_account_policy = aws.iam.Policy(\"cross_account_policy\",\n name=f\"{prefix}-crossaccount-iam-policy\",\n policy=this.json)\nthis_get_aws_assume_role_policy = databricks.get_aws_assume_role_policy(external_id=databricks_account_id)\ncross_account = aws.iam.Role(\"cross_account\",\n name=f\"{prefix}-crossaccount-iam-role\",\n assume_role_policy=this_get_aws_assume_role_policy.json,\n description=\"Grants Databricks full access to VPC resources\")\ncross_account_role_policy_attachment = aws.iam.RolePolicyAttachment(\"cross_account\",\n policy_arn=cross_account_policy.arn,\n role=cross_account.name)\n# required only in case of multi-workspace setup\nthis_mws_credentials = databricks.MwsCredentials(\"this\",\n account_id=databricks_account_id,\n credentials_name=f\"{prefix}-creds\",\n role_arn=cross_account.arn)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var @this = Databricks.GetAwsCrossAccountPolicy.Invoke();\n\n var crossAccountPolicy = new Aws.Iam.Policy(\"cross_account_policy\", new()\n {\n Name = $\"{prefix}-crossaccount-iam-policy\",\n PolicyDocument = @this.Apply(@this =\u003e @this.Apply(getAwsCrossAccountPolicyResult =\u003e getAwsCrossAccountPolicyResult.Json)),\n });\n\n var thisGetAwsAssumeRolePolicy = Databricks.GetAwsAssumeRolePolicy.Invoke(new()\n {\n ExternalId = databricksAccountId,\n });\n\n var crossAccount = new Aws.Iam.Role(\"cross_account\", new()\n {\n Name = $\"{prefix}-crossaccount-iam-role\",\n AssumeRolePolicy = thisGetAwsAssumeRolePolicy.Apply(getAwsAssumeRolePolicyResult =\u003e getAwsAssumeRolePolicyResult.Json),\n Description = \"Grants Databricks full access to VPC resources\",\n });\n\n var crossAccountRolePolicyAttachment = new Aws.Iam.RolePolicyAttachment(\"cross_account\", new()\n {\n PolicyArn = crossAccountPolicy.Arn,\n Role = crossAccount.Name,\n });\n\n // required only in case of multi-workspace setup\n var thisMwsCredentials = new Databricks.MwsCredentials(\"this\", new()\n {\n AccountId = databricksAccountId,\n CredentialsName = $\"{prefix}-creds\",\n RoleArn = crossAccount.Arn,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\t// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\tthis, err := databricks.GetAwsCrossAccountPolicy(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcrossAccountPolicy, err := iam.NewPolicy(ctx, \"cross_account_policy\", \u0026iam.PolicyArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-crossaccount-iam-policy\", prefix)),\n\t\t\tPolicy: pulumi.String(this.Json),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tthisGetAwsAssumeRolePolicy, err := databricks.GetAwsAssumeRolePolicy(ctx, \u0026databricks.GetAwsAssumeRolePolicyArgs{\n\t\t\tExternalId: databricksAccountId,\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcrossAccount, err := iam.NewRole(ctx, \"cross_account\", \u0026iam.RoleArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-crossaccount-iam-role\", prefix)),\n\t\t\tAssumeRolePolicy: pulumi.String(thisGetAwsAssumeRolePolicy.Json),\n\t\t\tDescription: pulumi.String(\"Grants Databricks full access to VPC resources\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"cross_account\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tPolicyArn: crossAccountPolicy.Arn,\n\t\t\tRole: crossAccount.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// required only in case of multi-workspace setup\n\t\t_, err = databricks.NewMwsCredentials(ctx, \"this\", \u0026databricks.MwsCredentialsArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tCredentialsName: pulumi.String(fmt.Sprintf(\"%v-creds\", prefix)),\n\t\t\tRoleArn: crossAccount.Arn,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;\nimport com.pulumi.aws.iam.Policy;\nimport com.pulumi.aws.iam.PolicyArgs;\nimport com.pulumi.databricks.inputs.GetAwsAssumeRolePolicyArgs;\nimport com.pulumi.aws.iam.Role;\nimport com.pulumi.aws.iam.RoleArgs;\nimport com.pulumi.aws.iam.RolePolicyAttachment;\nimport com.pulumi.aws.iam.RolePolicyAttachmentArgs;\nimport com.pulumi.databricks.MwsCredentials;\nimport com.pulumi.databricks.MwsCredentialsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var this = DatabricksFunctions.getAwsCrossAccountPolicy();\n\n var crossAccountPolicy = new Policy(\"crossAccountPolicy\", PolicyArgs.builder()\n .name(String.format(\"%s-crossaccount-iam-policy\", prefix))\n .policy(this_.json())\n .build());\n\n final var thisGetAwsAssumeRolePolicy = DatabricksFunctions.getAwsAssumeRolePolicy(GetAwsAssumeRolePolicyArgs.builder()\n .externalId(databricksAccountId)\n .build());\n\n var crossAccount = new Role(\"crossAccount\", RoleArgs.builder()\n .name(String.format(\"%s-crossaccount-iam-role\", prefix))\n .assumeRolePolicy(thisGetAwsAssumeRolePolicy.applyValue(getAwsAssumeRolePolicyResult -\u003e getAwsAssumeRolePolicyResult.json()))\n .description(\"Grants Databricks full access to VPC resources\")\n .build());\n\n var crossAccountRolePolicyAttachment = new RolePolicyAttachment(\"crossAccountRolePolicyAttachment\", RolePolicyAttachmentArgs.builder()\n .policyArn(crossAccountPolicy.arn())\n .role(crossAccount.name())\n .build());\n\n // required only in case of multi-workspace setup\n var thisMwsCredentials = new MwsCredentials(\"thisMwsCredentials\", MwsCredentialsArgs.builder()\n .accountId(databricksAccountId)\n .credentialsName(String.format(\"%s-creds\", prefix))\n .roleArn(crossAccount.arn())\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\nresources:\n crossAccountPolicy:\n type: aws:iam:Policy\n name: cross_account_policy\n properties:\n name: ${prefix}-crossaccount-iam-policy\n policy: ${this.json}\n crossAccount:\n type: aws:iam:Role\n name: cross_account\n properties:\n name: ${prefix}-crossaccount-iam-role\n assumeRolePolicy: ${thisGetAwsAssumeRolePolicy.json}\n description: Grants Databricks full access to VPC resources\n crossAccountRolePolicyAttachment:\n type: aws:iam:RolePolicyAttachment\n name: cross_account\n properties:\n policyArn: ${crossAccountPolicy.arn}\n role: ${crossAccount.name}\n # required only in case of multi-workspace setup\n thisMwsCredentials:\n type: databricks:MwsCredentials\n name: this\n properties:\n accountId: ${databricksAccountId}\n credentialsName: ${prefix}-creds\n roleArn: ${crossAccount.arn}\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsCrossAccountPolicy\n Arguments: {}\n thisGetAwsAssumeRolePolicy:\n fn::invoke:\n Function: databricks:getAwsAssumeRolePolicy\n Arguments:\n externalId: ${databricksAccountId}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning AWS Databricks E2 with a Hub \u0026 Spoke firewall for data exfiltration protection guide\n* databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.\n* databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default).\n", + "description": "This data source constructs necessary AWS STS assume role policy for you.\n\n## Example Usage\n\nEnd-to-end example of provisioning Cross-account IAM role with databricks_mws_credentials:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\n// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst this = databricks.getAwsCrossAccountPolicy({});\nconst crossAccountPolicy = new aws.iam.Policy(\"cross_account_policy\", {\n name: `${prefix}-crossaccount-iam-policy`,\n policy: _this.then(_this =\u003e _this.json),\n});\nconst thisGetAwsAssumeRolePolicy = databricks.getAwsAssumeRolePolicy({\n externalId: databricksAccountId,\n});\nconst crossAccount = new aws.iam.Role(\"cross_account\", {\n name: `${prefix}-crossaccount-iam-role`,\n assumeRolePolicy: thisGetAwsAssumeRolePolicy.then(thisGetAwsAssumeRolePolicy =\u003e thisGetAwsAssumeRolePolicy.json),\n description: \"Grants Databricks full access to VPC resources\",\n});\nconst crossAccountRolePolicyAttachment = new aws.iam.RolePolicyAttachment(\"cross_account\", {\n policyArn: crossAccountPolicy.arn,\n role: crossAccount.name,\n});\n// required only in case of multi-workspace setup\nconst thisMwsCredentials = new databricks.MwsCredentials(\"this\", {\n accountId: databricksAccountId,\n credentialsName: `${prefix}-creds`,\n roleArn: crossAccount.arn,\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\n# Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\nthis = databricks.get_aws_cross_account_policy()\ncross_account_policy = aws.iam.Policy(\"cross_account_policy\",\n name=f\"{prefix}-crossaccount-iam-policy\",\n policy=this.json)\nthis_get_aws_assume_role_policy = databricks.get_aws_assume_role_policy(external_id=databricks_account_id)\ncross_account = aws.iam.Role(\"cross_account\",\n name=f\"{prefix}-crossaccount-iam-role\",\n assume_role_policy=this_get_aws_assume_role_policy.json,\n description=\"Grants Databricks full access to VPC resources\")\ncross_account_role_policy_attachment = aws.iam.RolePolicyAttachment(\"cross_account\",\n policy_arn=cross_account_policy.arn,\n role=cross_account.name)\n# required only in case of multi-workspace setup\nthis_mws_credentials = databricks.MwsCredentials(\"this\",\n account_id=databricks_account_id,\n credentials_name=f\"{prefix}-creds\",\n role_arn=cross_account.arn)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n // Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var @this = Databricks.GetAwsCrossAccountPolicy.Invoke();\n\n var crossAccountPolicy = new Aws.Iam.Policy(\"cross_account_policy\", new()\n {\n Name = $\"{prefix}-crossaccount-iam-policy\",\n PolicyDocument = @this.Apply(@this =\u003e @this.Apply(getAwsCrossAccountPolicyResult =\u003e getAwsCrossAccountPolicyResult.Json)),\n });\n\n var thisGetAwsAssumeRolePolicy = Databricks.GetAwsAssumeRolePolicy.Invoke(new()\n {\n ExternalId = databricksAccountId,\n });\n\n var crossAccount = new Aws.Iam.Role(\"cross_account\", new()\n {\n Name = $\"{prefix}-crossaccount-iam-role\",\n AssumeRolePolicy = thisGetAwsAssumeRolePolicy.Apply(getAwsAssumeRolePolicyResult =\u003e getAwsAssumeRolePolicyResult.Json),\n Description = \"Grants Databricks full access to VPC resources\",\n });\n\n var crossAccountRolePolicyAttachment = new Aws.Iam.RolePolicyAttachment(\"cross_account\", new()\n {\n PolicyArn = crossAccountPolicy.Arn,\n Role = crossAccount.Name,\n });\n\n // required only in case of multi-workspace setup\n var thisMwsCredentials = new Databricks.MwsCredentials(\"this\", new()\n {\n AccountId = databricksAccountId,\n CredentialsName = $\"{prefix}-creds\",\n RoleArn = crossAccount.Arn,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\t// Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\tthis, err := databricks.GetAwsCrossAccountPolicy(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcrossAccountPolicy, err := iam.NewPolicy(ctx, \"cross_account_policy\", \u0026iam.PolicyArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-crossaccount-iam-policy\", prefix)),\n\t\t\tPolicy: pulumi.String(this.Json),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tthisGetAwsAssumeRolePolicy, err := databricks.GetAwsAssumeRolePolicy(ctx, \u0026databricks.GetAwsAssumeRolePolicyArgs{\n\t\t\tExternalId: databricksAccountId,\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcrossAccount, err := iam.NewRole(ctx, \"cross_account\", \u0026iam.RoleArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-crossaccount-iam-role\", prefix)),\n\t\t\tAssumeRolePolicy: pulumi.String(thisGetAwsAssumeRolePolicy.Json),\n\t\t\tDescription: pulumi.String(\"Grants Databricks full access to VPC resources\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRolePolicyAttachment(ctx, \"cross_account\", \u0026iam.RolePolicyAttachmentArgs{\n\t\t\tPolicyArn: crossAccountPolicy.Arn,\n\t\t\tRole: crossAccount.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// required only in case of multi-workspace setup\n\t\t_, err = databricks.NewMwsCredentials(ctx, \"this\", \u0026databricks.MwsCredentialsArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tCredentialsName: pulumi.String(fmt.Sprintf(\"%v-creds\", prefix)),\n\t\t\tRoleArn: crossAccount.Arn,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;\nimport com.pulumi.aws.iam.Policy;\nimport com.pulumi.aws.iam.PolicyArgs;\nimport com.pulumi.databricks.inputs.GetAwsAssumeRolePolicyArgs;\nimport com.pulumi.aws.iam.Role;\nimport com.pulumi.aws.iam.RoleArgs;\nimport com.pulumi.aws.iam.RolePolicyAttachment;\nimport com.pulumi.aws.iam.RolePolicyAttachmentArgs;\nimport com.pulumi.databricks.MwsCredentials;\nimport com.pulumi.databricks.MwsCredentialsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var this = DatabricksFunctions.getAwsCrossAccountPolicy();\n\n var crossAccountPolicy = new Policy(\"crossAccountPolicy\", PolicyArgs.builder()\n .name(String.format(\"%s-crossaccount-iam-policy\", prefix))\n .policy(this_.json())\n .build());\n\n final var thisGetAwsAssumeRolePolicy = DatabricksFunctions.getAwsAssumeRolePolicy(GetAwsAssumeRolePolicyArgs.builder()\n .externalId(databricksAccountId)\n .build());\n\n var crossAccount = new Role(\"crossAccount\", RoleArgs.builder()\n .name(String.format(\"%s-crossaccount-iam-role\", prefix))\n .assumeRolePolicy(thisGetAwsAssumeRolePolicy.applyValue(getAwsAssumeRolePolicyResult -\u003e getAwsAssumeRolePolicyResult.json()))\n .description(\"Grants Databricks full access to VPC resources\")\n .build());\n\n var crossAccountRolePolicyAttachment = new RolePolicyAttachment(\"crossAccountRolePolicyAttachment\", RolePolicyAttachmentArgs.builder()\n .policyArn(crossAccountPolicy.arn())\n .role(crossAccount.name())\n .build());\n\n // required only in case of multi-workspace setup\n var thisMwsCredentials = new MwsCredentials(\"thisMwsCredentials\", MwsCredentialsArgs.builder()\n .accountId(databricksAccountId)\n .credentialsName(String.format(\"%s-creds\", prefix))\n .roleArn(crossAccount.arn())\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\nresources:\n crossAccountPolicy:\n type: aws:iam:Policy\n name: cross_account_policy\n properties:\n name: ${prefix}-crossaccount-iam-policy\n policy: ${this.json}\n crossAccount:\n type: aws:iam:Role\n name: cross_account\n properties:\n name: ${prefix}-crossaccount-iam-role\n assumeRolePolicy: ${thisGetAwsAssumeRolePolicy.json}\n description: Grants Databricks full access to VPC resources\n crossAccountRolePolicyAttachment:\n type: aws:iam:RolePolicyAttachment\n name: cross_account\n properties:\n policyArn: ${crossAccountPolicy.arn}\n role: ${crossAccount.name}\n # required only in case of multi-workspace setup\n thisMwsCredentials:\n type: databricks:MwsCredentials\n name: this\n properties:\n accountId: ${databricksAccountId}\n credentialsName: ${prefix}-creds\n roleArn: ${crossAccount.arn}\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsCrossAccountPolicy\n Arguments: {}\n thisGetAwsAssumeRolePolicy:\n fn::invoke:\n Function: databricks:getAwsAssumeRolePolicy\n Arguments:\n externalId: ${databricksAccountId}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning AWS Databricks workspaces with a Hub \u0026 Spoke firewall for data exfiltration protection guide\n* databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.\n* databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default).\n", "inputs": { "description": "A collection of arguments for invoking getAwsAssumeRolePolicy.\n", "properties": { @@ -26727,7 +26918,7 @@ }, "databricksE2AccountId": { "type": "string", - "description": "Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket\n", + "description": "Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket\n", "willReplaceOnChanges": true }, "fullAccessRole": { @@ -26774,7 +26965,7 @@ } }, "databricks:index/getAwsCrossAccountPolicy:getAwsCrossAccountPolicy": { - "description": "\u003e **Note** This data source could be only used with account-level provider!\n\nThis data source constructs necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default).\n\n## Example Usage\n\nFor more detailed usage please see databricks.getAwsAssumeRolePolicy or databricks_aws_s3_mount pages.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst this = databricks.getAwsCrossAccountPolicy({});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.get_aws_cross_account_policy()\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = Databricks.GetAwsCrossAccountPolicy.Invoke();\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.GetAwsCrossAccountPolicy(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var this = DatabricksFunctions.getAwsCrossAccountPolicy();\n\n }\n}\n```\n```yaml\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsCrossAccountPolicy\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning AWS Databricks E2 with a Hub \u0026 Spoke firewall for data exfiltration protection guide\n* databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy.\n* databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.\n* databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.\n", + "description": "\u003e **Note** This data source could be only used with account-level provider!\n\nThis data source constructs necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default).\n\n## Example Usage\n\nFor more detailed usage please see databricks.getAwsAssumeRolePolicy or databricks_aws_s3_mount pages.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst this = databricks.getAwsCrossAccountPolicy({});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.get_aws_cross_account_policy()\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = Databricks.GetAwsCrossAccountPolicy.Invoke();\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.GetAwsCrossAccountPolicy(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsCrossAccountPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var this = DatabricksFunctions.getAwsCrossAccountPolicy();\n\n }\n}\n```\n```yaml\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsCrossAccountPolicy\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning AWS Databricks workspaces with a Hub \u0026 Spoke firewall for data exfiltration protection guide\n* databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy.\n* databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.\n* databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.\n", "inputs": { "description": "A collection of arguments for invoking getAwsCrossAccountPolicy.\n", "properties": { @@ -26855,7 +27046,7 @@ } }, "databricks:index/getAwsUnityCatalogAssumeRolePolicy:getAwsUnityCatalogAssumeRolePolicy": { - "description": "\u003e **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions.\n\nThis data source constructs necessary AWS Unity Catalog assume role policy for you.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst this = databricks.getAwsUnityCatalogPolicy({\n awsAccountId: awsAccountId,\n bucketName: \"databricks-bucket\",\n roleName: `${prefix}-uc-access`,\n kmsName: \"databricks-kms\",\n});\nconst thisGetAwsUnityCatalogAssumeRolePolicy = databricks.getAwsUnityCatalogAssumeRolePolicy({\n awsAccountId: awsAccountId,\n roleName: `${prefix}-uc-access`,\n externalId: \"12345\",\n});\nconst unityMetastore = new aws.iam.Policy(\"unity_metastore\", {\n name: `${prefix}-unity-catalog-metastore-access-iam-policy`,\n policy: _this.then(_this =\u003e _this.json),\n});\nconst metastoreDataAccess = new aws.iam.Role(\"metastore_data_access\", {\n name: `${prefix}-uc-access`,\n assumeRolePolicy: passroleForUc.json,\n managedPolicyArns: [unityMetastore.arn],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nthis = databricks.get_aws_unity_catalog_policy(aws_account_id=aws_account_id,\n bucket_name=\"databricks-bucket\",\n role_name=f\"{prefix}-uc-access\",\n kms_name=\"databricks-kms\")\nthis_get_aws_unity_catalog_assume_role_policy = databricks.get_aws_unity_catalog_assume_role_policy(aws_account_id=aws_account_id,\n role_name=f\"{prefix}-uc-access\",\n external_id=\"12345\")\nunity_metastore = aws.iam.Policy(\"unity_metastore\",\n name=f\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n policy=this.json)\nmetastore_data_access = aws.iam.Role(\"metastore_data_access\",\n name=f\"{prefix}-uc-access\",\n assume_role_policy=passrole_for_uc[\"json\"],\n managed_policy_arns=[unity_metastore.arn])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = Databricks.GetAwsUnityCatalogPolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n BucketName = \"databricks-bucket\",\n RoleName = $\"{prefix}-uc-access\",\n KmsName = \"databricks-kms\",\n });\n\n var thisGetAwsUnityCatalogAssumeRolePolicy = Databricks.GetAwsUnityCatalogAssumeRolePolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n RoleName = $\"{prefix}-uc-access\",\n ExternalId = \"12345\",\n });\n\n var unityMetastore = new Aws.Iam.Policy(\"unity_metastore\", new()\n {\n Name = $\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n PolicyDocument = @this.Apply(@this =\u003e @this.Apply(getAwsUnityCatalogPolicyResult =\u003e getAwsUnityCatalogPolicyResult.Json)),\n });\n\n var metastoreDataAccess = new Aws.Iam.Role(\"metastore_data_access\", new()\n {\n Name = $\"{prefix}-uc-access\",\n AssumeRolePolicy = passroleForUc.Json,\n ManagedPolicyArns = new[]\n {\n unityMetastore.Arn,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthis, err := databricks.GetAwsUnityCatalogPolicy(ctx, \u0026databricks.GetAwsUnityCatalogPolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tBucketName: \"databricks-bucket\",\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tKmsName: pulumi.StringRef(\"databricks-kms\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.GetAwsUnityCatalogAssumeRolePolicy(ctx, \u0026databricks.GetAwsUnityCatalogAssumeRolePolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tExternalId: \"12345\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tunityMetastore, err := iam.NewPolicy(ctx, \"unity_metastore\", \u0026iam.PolicyArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-unity-catalog-metastore-access-iam-policy\", prefix)),\n\t\t\tPolicy: pulumi.String(this.Json),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRole(ctx, \"metastore_data_access\", \u0026iam.RoleArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-uc-access\", prefix)),\n\t\t\tAssumeRolePolicy: pulumi.Any(passroleForUc.Json),\n\t\t\tManagedPolicyArns: pulumi.StringArray{\n\t\t\t\tunityMetastore.Arn,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogPolicyArgs;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogAssumeRolePolicyArgs;\nimport com.pulumi.aws.iam.Policy;\nimport com.pulumi.aws.iam.PolicyArgs;\nimport com.pulumi.aws.iam.Role;\nimport com.pulumi.aws.iam.RoleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var this = DatabricksFunctions.getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .bucketName(\"databricks-bucket\")\n .roleName(String.format(\"%s-uc-access\", prefix))\n .kmsName(\"databricks-kms\")\n .build());\n\n final var thisGetAwsUnityCatalogAssumeRolePolicy = DatabricksFunctions.getAwsUnityCatalogAssumeRolePolicy(GetAwsUnityCatalogAssumeRolePolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .roleName(String.format(\"%s-uc-access\", prefix))\n .externalId(\"12345\")\n .build());\n\n var unityMetastore = new Policy(\"unityMetastore\", PolicyArgs.builder()\n .name(String.format(\"%s-unity-catalog-metastore-access-iam-policy\", prefix))\n .policy(this_.json())\n .build());\n\n var metastoreDataAccess = new Role(\"metastoreDataAccess\", RoleArgs.builder()\n .name(String.format(\"%s-uc-access\", prefix))\n .assumeRolePolicy(passroleForUc.json())\n .managedPolicyArns(unityMetastore.arn())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n unityMetastore:\n type: aws:iam:Policy\n name: unity_metastore\n properties:\n name: ${prefix}-unity-catalog-metastore-access-iam-policy\n policy: ${this.json}\n metastoreDataAccess:\n type: aws:iam:Role\n name: metastore_data_access\n properties:\n name: ${prefix}-uc-access\n assumeRolePolicy: ${passroleForUc.json}\n managedPolicyArns:\n - ${unityMetastore.arn}\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogPolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n bucketName: databricks-bucket\n roleName: ${prefix}-uc-access\n kmsName: databricks-kms\n thisGetAwsUnityCatalogAssumeRolePolicy:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogAssumeRolePolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n roleName: ${prefix}-uc-access\n externalId: '12345'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", + "description": "\u003e **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions.\n\nThis data source constructs the necessary AWS Unity Catalog assume role policy for you.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst this = databricks.getAwsUnityCatalogPolicy({\n awsAccountId: awsAccountId,\n bucketName: \"databricks-bucket\",\n roleName: `${prefix}-uc-access`,\n kmsName: \"databricks-kms\",\n});\nconst thisGetAwsUnityCatalogAssumeRolePolicy = databricks.getAwsUnityCatalogAssumeRolePolicy({\n awsAccountId: awsAccountId,\n roleName: `${prefix}-uc-access`,\n externalId: \"12345\",\n});\nconst unityMetastore = new aws.iam.Policy(\"unity_metastore\", {\n name: `${prefix}-unity-catalog-metastore-access-iam-policy`,\n policy: _this.then(_this =\u003e _this.json),\n});\nconst metastoreDataAccess = new aws.iam.Role(\"metastore_data_access\", {\n name: `${prefix}-uc-access`,\n assumeRolePolicy: thisAwsIamPolicyDocument.json,\n managedPolicyArns: [unityMetastore.arn],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nthis = databricks.get_aws_unity_catalog_policy(aws_account_id=aws_account_id,\n bucket_name=\"databricks-bucket\",\n role_name=f\"{prefix}-uc-access\",\n kms_name=\"databricks-kms\")\nthis_get_aws_unity_catalog_assume_role_policy = databricks.get_aws_unity_catalog_assume_role_policy(aws_account_id=aws_account_id,\n role_name=f\"{prefix}-uc-access\",\n external_id=\"12345\")\nunity_metastore = aws.iam.Policy(\"unity_metastore\",\n name=f\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n policy=this.json)\nmetastore_data_access = aws.iam.Role(\"metastore_data_access\",\n name=f\"{prefix}-uc-access\",\n assume_role_policy=this_aws_iam_policy_document[\"json\"],\n managed_policy_arns=[unity_metastore.arn])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = Databricks.GetAwsUnityCatalogPolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n BucketName = \"databricks-bucket\",\n RoleName = $\"{prefix}-uc-access\",\n KmsName = \"databricks-kms\",\n });\n\n var thisGetAwsUnityCatalogAssumeRolePolicy = Databricks.GetAwsUnityCatalogAssumeRolePolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n RoleName = $\"{prefix}-uc-access\",\n ExternalId = \"12345\",\n });\n\n var unityMetastore = new Aws.Iam.Policy(\"unity_metastore\", new()\n {\n Name = $\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n PolicyDocument = @this.Apply(@this =\u003e @this.Apply(getAwsUnityCatalogPolicyResult =\u003e getAwsUnityCatalogPolicyResult.Json)),\n });\n\n var metastoreDataAccess = new Aws.Iam.Role(\"metastore_data_access\", new()\n {\n Name = $\"{prefix}-uc-access\",\n AssumeRolePolicy = thisAwsIamPolicyDocument.Json,\n ManagedPolicyArns = new[]\n {\n unityMetastore.Arn,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthis, err := databricks.GetAwsUnityCatalogPolicy(ctx, \u0026databricks.GetAwsUnityCatalogPolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tBucketName: \"databricks-bucket\",\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tKmsName: pulumi.StringRef(\"databricks-kms\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.GetAwsUnityCatalogAssumeRolePolicy(ctx, \u0026databricks.GetAwsUnityCatalogAssumeRolePolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tExternalId: \"12345\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tunityMetastore, err := iam.NewPolicy(ctx, \"unity_metastore\", \u0026iam.PolicyArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-unity-catalog-metastore-access-iam-policy\", prefix)),\n\t\t\tPolicy: pulumi.String(this.Json),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRole(ctx, \"metastore_data_access\", \u0026iam.RoleArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-uc-access\", prefix)),\n\t\t\tAssumeRolePolicy: pulumi.Any(thisAwsIamPolicyDocument.Json),\n\t\t\tManagedPolicyArns: pulumi.StringArray{\n\t\t\t\tunityMetastore.Arn,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogPolicyArgs;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogAssumeRolePolicyArgs;\nimport com.pulumi.aws.iam.Policy;\nimport com.pulumi.aws.iam.PolicyArgs;\nimport com.pulumi.aws.iam.Role;\nimport com.pulumi.aws.iam.RoleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var this = DatabricksFunctions.getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .bucketName(\"databricks-bucket\")\n .roleName(String.format(\"%s-uc-access\", prefix))\n .kmsName(\"databricks-kms\")\n .build());\n\n final var thisGetAwsUnityCatalogAssumeRolePolicy = DatabricksFunctions.getAwsUnityCatalogAssumeRolePolicy(GetAwsUnityCatalogAssumeRolePolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .roleName(String.format(\"%s-uc-access\", prefix))\n .externalId(\"12345\")\n .build());\n\n var unityMetastore = new Policy(\"unityMetastore\", PolicyArgs.builder()\n .name(String.format(\"%s-unity-catalog-metastore-access-iam-policy\", prefix))\n .policy(this_.json())\n .build());\n\n var metastoreDataAccess = new Role(\"metastoreDataAccess\", RoleArgs.builder()\n .name(String.format(\"%s-uc-access\", prefix))\n .assumeRolePolicy(thisAwsIamPolicyDocument.json())\n .managedPolicyArns(unityMetastore.arn())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n unityMetastore:\n type: aws:iam:Policy\n name: unity_metastore\n properties:\n name: ${prefix}-unity-catalog-metastore-access-iam-policy\n policy: ${this.json}\n metastoreDataAccess:\n type: aws:iam:Role\n name: metastore_data_access\n properties:\n name: ${prefix}-uc-access\n assumeRolePolicy: ${thisAwsIamPolicyDocument.json}\n managedPolicyArns:\n - ${unityMetastore.arn}\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogPolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n bucketName: databricks-bucket\n roleName: ${prefix}-uc-access\n kmsName: databricks-kms\n thisGetAwsUnityCatalogAssumeRolePolicy:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogAssumeRolePolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n roleName: ${prefix}-uc-access\n externalId: '12345'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", "inputs": { "description": "A collection of arguments for invoking getAwsUnityCatalogAssumeRolePolicy.\n", "properties": { @@ -26871,7 +27062,7 @@ }, "roleName": { "type": "string", - "description": "The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws).\n", + "description": "The name of the AWS IAM role to be created for Unity Catalog.\n", "willReplaceOnChanges": true }, "unityCatalogIamArn": { @@ -26921,7 +27112,7 @@ } }, "databricks:index/getAwsUnityCatalogPolicy:getAwsUnityCatalogPolicy": { - "description": "\u003e **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions.\n\nThis data source constructs necessary AWS Unity Catalog policy for you.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst this = databricks.getAwsUnityCatalogPolicy({\n awsAccountId: awsAccountId,\n bucketName: \"databricks-bucket\",\n roleName: `${prefix}-uc-access`,\n kmsName: \"databricks-kms\",\n});\nconst thisGetAwsUnityCatalogAssumeRolePolicy = databricks.getAwsUnityCatalogAssumeRolePolicy({\n awsAccountId: awsAccountId,\n roleName: `${prefix}-uc-access`,\n externalId: \"12345\",\n});\nconst unityMetastore = new aws.iam.Policy(\"unity_metastore\", {\n name: `${prefix}-unity-catalog-metastore-access-iam-policy`,\n policy: _this.then(_this =\u003e _this.json),\n});\nconst metastoreDataAccess = new aws.iam.Role(\"metastore_data_access\", {\n name: `${prefix}-uc-access`,\n assumeRolePolicy: passroleForUc.json,\n managedPolicyArns: [unityMetastore.arn],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nthis = databricks.get_aws_unity_catalog_policy(aws_account_id=aws_account_id,\n bucket_name=\"databricks-bucket\",\n role_name=f\"{prefix}-uc-access\",\n kms_name=\"databricks-kms\")\nthis_get_aws_unity_catalog_assume_role_policy = databricks.get_aws_unity_catalog_assume_role_policy(aws_account_id=aws_account_id,\n role_name=f\"{prefix}-uc-access\",\n external_id=\"12345\")\nunity_metastore = aws.iam.Policy(\"unity_metastore\",\n name=f\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n policy=this.json)\nmetastore_data_access = aws.iam.Role(\"metastore_data_access\",\n name=f\"{prefix}-uc-access\",\n assume_role_policy=passrole_for_uc[\"json\"],\n managed_policy_arns=[unity_metastore.arn])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = Databricks.GetAwsUnityCatalogPolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n BucketName = \"databricks-bucket\",\n RoleName = $\"{prefix}-uc-access\",\n KmsName = \"databricks-kms\",\n });\n\n var thisGetAwsUnityCatalogAssumeRolePolicy = Databricks.GetAwsUnityCatalogAssumeRolePolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n RoleName = $\"{prefix}-uc-access\",\n ExternalId = \"12345\",\n });\n\n var unityMetastore = new Aws.Iam.Policy(\"unity_metastore\", new()\n {\n Name = $\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n PolicyDocument = @this.Apply(@this =\u003e @this.Apply(getAwsUnityCatalogPolicyResult =\u003e getAwsUnityCatalogPolicyResult.Json)),\n });\n\n var metastoreDataAccess = new Aws.Iam.Role(\"metastore_data_access\", new()\n {\n Name = $\"{prefix}-uc-access\",\n AssumeRolePolicy = passroleForUc.Json,\n ManagedPolicyArns = new[]\n {\n unityMetastore.Arn,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthis, err := databricks.GetAwsUnityCatalogPolicy(ctx, \u0026databricks.GetAwsUnityCatalogPolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tBucketName: \"databricks-bucket\",\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tKmsName: pulumi.StringRef(\"databricks-kms\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.GetAwsUnityCatalogAssumeRolePolicy(ctx, \u0026databricks.GetAwsUnityCatalogAssumeRolePolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tExternalId: \"12345\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tunityMetastore, err := iam.NewPolicy(ctx, \"unity_metastore\", \u0026iam.PolicyArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-unity-catalog-metastore-access-iam-policy\", prefix)),\n\t\t\tPolicy: pulumi.String(this.Json),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRole(ctx, \"metastore_data_access\", \u0026iam.RoleArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-uc-access\", prefix)),\n\t\t\tAssumeRolePolicy: pulumi.Any(passroleForUc.Json),\n\t\t\tManagedPolicyArns: pulumi.StringArray{\n\t\t\t\tunityMetastore.Arn,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogPolicyArgs;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogAssumeRolePolicyArgs;\nimport com.pulumi.aws.iam.Policy;\nimport com.pulumi.aws.iam.PolicyArgs;\nimport com.pulumi.aws.iam.Role;\nimport com.pulumi.aws.iam.RoleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var this = DatabricksFunctions.getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .bucketName(\"databricks-bucket\")\n .roleName(String.format(\"%s-uc-access\", prefix))\n .kmsName(\"databricks-kms\")\n .build());\n\n final var thisGetAwsUnityCatalogAssumeRolePolicy = DatabricksFunctions.getAwsUnityCatalogAssumeRolePolicy(GetAwsUnityCatalogAssumeRolePolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .roleName(String.format(\"%s-uc-access\", prefix))\n .externalId(\"12345\")\n .build());\n\n var unityMetastore = new Policy(\"unityMetastore\", PolicyArgs.builder()\n .name(String.format(\"%s-unity-catalog-metastore-access-iam-policy\", prefix))\n .policy(this_.json())\n .build());\n\n var metastoreDataAccess = new Role(\"metastoreDataAccess\", RoleArgs.builder()\n .name(String.format(\"%s-uc-access\", prefix))\n .assumeRolePolicy(passroleForUc.json())\n .managedPolicyArns(unityMetastore.arn())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n unityMetastore:\n type: aws:iam:Policy\n name: unity_metastore\n properties:\n name: ${prefix}-unity-catalog-metastore-access-iam-policy\n policy: ${this.json}\n metastoreDataAccess:\n type: aws:iam:Role\n name: metastore_data_access\n properties:\n name: ${prefix}-uc-access\n assumeRolePolicy: ${passroleForUc.json}\n managedPolicyArns:\n - ${unityMetastore.arn}\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogPolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n bucketName: databricks-bucket\n roleName: ${prefix}-uc-access\n kmsName: databricks-kms\n thisGetAwsUnityCatalogAssumeRolePolicy:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogAssumeRolePolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n roleName: ${prefix}-uc-access\n externalId: '12345'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", + "description": "\u003e **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions.\n\nThis data source constructs the necessary AWS Unity Catalog policy for you.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst this = databricks.getAwsUnityCatalogPolicy({\n awsAccountId: awsAccountId,\n bucketName: \"databricks-bucket\",\n roleName: `${prefix}-uc-access`,\n kmsName: \"databricks-kms\",\n});\nconst thisGetAwsUnityCatalogAssumeRolePolicy = databricks.getAwsUnityCatalogAssumeRolePolicy({\n awsAccountId: awsAccountId,\n roleName: `${prefix}-uc-access`,\n externalId: \"12345\",\n});\nconst unityMetastore = new aws.iam.Policy(\"unity_metastore\", {\n name: `${prefix}-unity-catalog-metastore-access-iam-policy`,\n policy: _this.then(_this =\u003e _this.json),\n});\nconst metastoreDataAccess = new aws.iam.Role(\"metastore_data_access\", {\n name: `${prefix}-uc-access`,\n assumeRolePolicy: thisAwsIamPolicyDocument.json,\n managedPolicyArns: [unityMetastore.arn],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nthis = databricks.get_aws_unity_catalog_policy(aws_account_id=aws_account_id,\n bucket_name=\"databricks-bucket\",\n role_name=f\"{prefix}-uc-access\",\n kms_name=\"databricks-kms\")\nthis_get_aws_unity_catalog_assume_role_policy = databricks.get_aws_unity_catalog_assume_role_policy(aws_account_id=aws_account_id,\n role_name=f\"{prefix}-uc-access\",\n external_id=\"12345\")\nunity_metastore = aws.iam.Policy(\"unity_metastore\",\n name=f\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n policy=this.json)\nmetastore_data_access = aws.iam.Role(\"metastore_data_access\",\n name=f\"{prefix}-uc-access\",\n assume_role_policy=this_aws_iam_policy_document[\"json\"],\n managed_policy_arns=[unity_metastore.arn])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = Databricks.GetAwsUnityCatalogPolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n BucketName = \"databricks-bucket\",\n RoleName = $\"{prefix}-uc-access\",\n KmsName = \"databricks-kms\",\n });\n\n var thisGetAwsUnityCatalogAssumeRolePolicy = Databricks.GetAwsUnityCatalogAssumeRolePolicy.Invoke(new()\n {\n AwsAccountId = awsAccountId,\n RoleName = $\"{prefix}-uc-access\",\n ExternalId = \"12345\",\n });\n\n var unityMetastore = new Aws.Iam.Policy(\"unity_metastore\", new()\n {\n Name = $\"{prefix}-unity-catalog-metastore-access-iam-policy\",\n PolicyDocument = @this.Apply(@this =\u003e @this.Apply(getAwsUnityCatalogPolicyResult =\u003e getAwsUnityCatalogPolicyResult.Json)),\n });\n\n var metastoreDataAccess = new Aws.Iam.Role(\"metastore_data_access\", new()\n {\n Name = $\"{prefix}-uc-access\",\n AssumeRolePolicy = thisAwsIamPolicyDocument.Json,\n ManagedPolicyArns = new[]\n {\n unityMetastore.Arn,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthis, err := databricks.GetAwsUnityCatalogPolicy(ctx, \u0026databricks.GetAwsUnityCatalogPolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tBucketName: \"databricks-bucket\",\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tKmsName: pulumi.StringRef(\"databricks-kms\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.GetAwsUnityCatalogAssumeRolePolicy(ctx, \u0026databricks.GetAwsUnityCatalogAssumeRolePolicyArgs{\n\t\t\tAwsAccountId: awsAccountId,\n\t\t\tRoleName: fmt.Sprintf(\"%v-uc-access\", prefix),\n\t\t\tExternalId: \"12345\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tunityMetastore, err := iam.NewPolicy(ctx, \"unity_metastore\", \u0026iam.PolicyArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-unity-catalog-metastore-access-iam-policy\", prefix)),\n\t\t\tPolicy: pulumi.String(this.Json),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewRole(ctx, \"metastore_data_access\", \u0026iam.RoleArgs{\n\t\t\tName: pulumi.String(fmt.Sprintf(\"%v-uc-access\", prefix)),\n\t\t\tAssumeRolePolicy: pulumi.Any(thisAwsIamPolicyDocument.Json),\n\t\t\tManagedPolicyArns: pulumi.StringArray{\n\t\t\t\tunityMetastore.Arn,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogPolicyArgs;\nimport com.pulumi.databricks.inputs.GetAwsUnityCatalogAssumeRolePolicyArgs;\nimport com.pulumi.aws.iam.Policy;\nimport com.pulumi.aws.iam.PolicyArgs;\nimport com.pulumi.aws.iam.Role;\nimport com.pulumi.aws.iam.RoleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var this = DatabricksFunctions.getAwsUnityCatalogPolicy(GetAwsUnityCatalogPolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .bucketName(\"databricks-bucket\")\n .roleName(String.format(\"%s-uc-access\", prefix))\n .kmsName(\"databricks-kms\")\n .build());\n\n final var thisGetAwsUnityCatalogAssumeRolePolicy = DatabricksFunctions.getAwsUnityCatalogAssumeRolePolicy(GetAwsUnityCatalogAssumeRolePolicyArgs.builder()\n .awsAccountId(awsAccountId)\n .roleName(String.format(\"%s-uc-access\", prefix))\n .externalId(\"12345\")\n .build());\n\n var unityMetastore = new Policy(\"unityMetastore\", PolicyArgs.builder()\n .name(String.format(\"%s-unity-catalog-metastore-access-iam-policy\", prefix))\n .policy(this_.json())\n .build());\n\n var metastoreDataAccess = new Role(\"metastoreDataAccess\", RoleArgs.builder()\n .name(String.format(\"%s-uc-access\", prefix))\n .assumeRolePolicy(thisAwsIamPolicyDocument.json())\n .managedPolicyArns(unityMetastore.arn())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n unityMetastore:\n type: aws:iam:Policy\n name: unity_metastore\n properties:\n name: ${prefix}-unity-catalog-metastore-access-iam-policy\n policy: ${this.json}\n metastoreDataAccess:\n type: aws:iam:Role\n name: metastore_data_access\n properties:\n name: ${prefix}-uc-access\n assumeRolePolicy: ${thisAwsIamPolicyDocument.json}\n managedPolicyArns:\n - ${unityMetastore.arn}\nvariables:\n this:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogPolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n bucketName: databricks-bucket\n roleName: ${prefix}-uc-access\n kmsName: databricks-kms\n thisGetAwsUnityCatalogAssumeRolePolicy:\n fn::invoke:\n Function: databricks:getAwsUnityCatalogAssumeRolePolicy\n Arguments:\n awsAccountId: ${awsAccountId}\n roleName: ${prefix}-uc-access\n externalId: '12345'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", "inputs": { "description": "A collection of arguments for invoking getAwsUnityCatalogPolicy.\n", "properties": { @@ -28272,7 +28463,7 @@ } }, "databricks:index/getMwsCredentials:getMwsCredentials": { - "description": "\u003e **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.\n\nLists all databricks.MwsCredentials in Databricks Account.\n\n\u003e **Note** `account_id` provider configuration property is required for this resource to work.\n\n## Example Usage\n\nListing all credentials in Databricks Account\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst all = databricks.getMwsCredentials({});\nexport const allMwsCredentials = all.then(all =\u003e all.ids);\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nall = databricks.get_mws_credentials()\npulumi.export(\"allMwsCredentials\", all.ids)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var all = Databricks.GetMwsCredentials.Invoke();\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"allMwsCredentials\"] = all.Apply(getMwsCredentialsResult =\u003e getMwsCredentialsResult.Ids),\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tall, err := databricks.LookupMwsCredentials(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"allMwsCredentials\", all.Ids)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetMwsCredentialsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var all = DatabricksFunctions.getMwsCredentials();\n\n ctx.export(\"allMwsCredentials\", all.applyValue(getMwsCredentialsResult -\u003e getMwsCredentialsResult.ids()));\n }\n}\n```\n```yaml\nvariables:\n all:\n fn::invoke:\n Function: databricks:getMwsCredentials\n Arguments: {}\noutputs:\n allMwsCredentials: ${all.ids}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n", + "description": "\u003e **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.\n\nLists all databricks.MwsCredentials in Databricks Account.\n\n\u003e **Note** `account_id` provider configuration property is required for this resource to work.\n\n## Example Usage\n\nListing all credentials in Databricks Account\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst all = databricks.getMwsCredentials({});\nexport const allMwsCredentials = all.then(all =\u003e all.ids);\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nall = databricks.get_mws_credentials()\npulumi.export(\"allMwsCredentials\", all.ids)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var all = Databricks.GetMwsCredentials.Invoke();\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"allMwsCredentials\"] = all.Apply(getMwsCredentialsResult =\u003e getMwsCredentialsResult.Ids),\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tall, err := databricks.LookupMwsCredentials(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"allMwsCredentials\", all.Ids)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetMwsCredentialsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var all = DatabricksFunctions.getMwsCredentials();\n\n ctx.export(\"allMwsCredentials\", all.applyValue(getMwsCredentialsResult -\u003e getMwsCredentialsResult.ids()));\n }\n}\n```\n```yaml\nvariables:\n all:\n fn::invoke:\n Function: databricks:getMwsCredentials\n Arguments: {}\noutputs:\n allMwsCredentials: ${all.ids}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n", "inputs": { "description": "A collection of arguments for invoking getMwsCredentials.\n", "properties": { @@ -28309,7 +28500,7 @@ } }, "databricks:index/getMwsWorkspaces:getMwsWorkspaces": { - "description": "\u003e **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.\n\nLists all databricks.MwsWorkspaces in Databricks Account.\n\n\u003e **Note** `account_id` provider configuration property is required for this resource to work.\n\n## Example Usage\n\nListing all workspaces in\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst all = databricks.getMwsWorkspaces({});\nexport const allMwsWorkspaces = all.then(all =\u003e all.ids);\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nall = databricks.get_mws_workspaces()\npulumi.export(\"allMwsWorkspaces\", all.ids)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var all = Databricks.GetMwsWorkspaces.Invoke();\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"allMwsWorkspaces\"] = all.Apply(getMwsWorkspacesResult =\u003e getMwsWorkspacesResult.Ids),\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tall, err := databricks.LookupMwsWorkspaces(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"allMwsWorkspaces\", all.Ids)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetMwsWorkspacesArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var all = DatabricksFunctions.getMwsWorkspaces();\n\n ctx.export(\"allMwsWorkspaces\", all.applyValue(getMwsWorkspacesResult -\u003e getMwsWorkspacesResult.ids()));\n }\n}\n```\n```yaml\nvariables:\n all:\n fn::invoke:\n Function: databricks:getMwsWorkspaces\n Arguments: {}\noutputs:\n allMwsWorkspaces: ${all.ids}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* databricks.MwsWorkspaces to manage Databricks E2 Workspaces.\n* databricks.MetastoreAssignment\n", + "description": "\u003e **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors.\n\nLists all databricks.MwsWorkspaces in Databricks Account.\n\n\u003e **Note** `account_id` provider configuration property is required for this resource to work.\n\n## Example Usage\n\nListing all workspaces in\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst all = databricks.getMwsWorkspaces({});\nexport const allMwsWorkspaces = all.then(all =\u003e all.ids);\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nall = databricks.get_mws_workspaces()\npulumi.export(\"allMwsWorkspaces\", all.ids)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var all = Databricks.GetMwsWorkspaces.Invoke();\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"allMwsWorkspaces\"] = all.Apply(getMwsWorkspacesResult =\u003e getMwsWorkspacesResult.Ids),\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tall, err := databricks.LookupMwsWorkspaces(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"allMwsWorkspaces\", all.Ids)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetMwsWorkspacesArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var all = DatabricksFunctions.getMwsWorkspaces();\n\n ctx.export(\"allMwsWorkspaces\", all.applyValue(getMwsWorkspacesResult -\u003e getMwsWorkspacesResult.ids()));\n }\n}\n```\n```yaml\nvariables:\n all:\n fn::invoke:\n Function: databricks:getMwsWorkspaces\n Arguments: {}\noutputs:\n allMwsWorkspaces: ${all.ids}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP.\n* databricks.MetastoreAssignment\n", "inputs": { "description": "A collection of arguments for invoking getMwsWorkspaces.\n", "properties": { diff --git a/provider/go.mod b/provider/go.mod index aad5bd8d..6e708b23 100644 --- a/provider/go.mod +++ b/provider/go.mod @@ -1,24 +1,27 @@ module github.com/pulumi/pulumi-databricks/provider -go 1.21 +go 1.22 + +toolchain go1.22.4 replace github.com/hashicorp/terraform-plugin-sdk/v2 => github.com/pulumi/terraform-plugin-sdk/v2 v2.0.0-20240520223432-0c0bf0d65f10 require ( - github.com/databricks/databricks-sdk-go v0.41.0 - github.com/databricks/terraform-provider-databricks v1.47.0 + github.com/databricks/databricks-sdk-go v0.43.0 + github.com/databricks/terraform-provider-databricks v1.48.0 github.com/pulumi/pulumi-terraform-bridge/v3 v3.85.0 ) require ( - cloud.google.com/go v0.112.1 // indirect - cloud.google.com/go/compute v1.25.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.6 // indirect - cloud.google.com/go/kms v1.15.7 // indirect + cloud.google.com/go v0.114.0 // indirect + cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/iam v1.1.7 // indirect + cloud.google.com/go/kms v1.15.8 // indirect cloud.google.com/go/logging v1.9.0 // indirect - cloud.google.com/go/longrunning v0.5.5 // indirect - cloud.google.com/go/storage v1.39.1 // indirect + cloud.google.com/go/longrunning v0.5.6 // indirect + cloud.google.com/go/storage v1.40.0 // indirect dario.cat/mergo v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect @@ -94,7 +97,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/google/wire v0.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -221,7 +224,7 @@ require ( golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect @@ -229,13 +232,13 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.22.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/api v0.169.0 // indirect + google.golang.org/api v0.182.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 // indirect - google.golang.org/grpc v1.63.2 // indirect - google.golang.org/protobuf v1.34.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/provider/go.sum b/provider/go.sum index 276b980a..b84b430e 100644 --- a/provider/go.sum +++ b/provider/go.sum @@ -43,8 +43,8 @@ cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5x cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -161,6 +161,10 @@ cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4= cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs= cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= +cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= +cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -304,13 +308,12 @@ cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdi cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU= -cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -593,8 +596,8 @@ cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+K cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -634,8 +637,8 @@ cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUE cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= cloud.google.com/go/kms v1.15.4/go.mod h1:L3Sdj6QTHK8dfwK5D1JLsAyELsNMnd3tAIwGS4ltKpc= cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= -cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM= -cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI= +cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= +cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -667,8 +670,8 @@ cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHS cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y= cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= -cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/longrunning v0.5.6 h1:xAe8+0YaWoCKr9t1+aWe+OeQgN/iJK1fEgZSXmjuEaE= +cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -1014,8 +1017,8 @@ cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= -cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY= -cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -1331,10 +1334,10 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.41.0 h1:OyhYY+Q6+gqkWeXmpGEiacoU2RStTeWPF0x4vmqbQdc= -github.com/databricks/databricks-sdk-go v0.41.0/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo= -github.com/databricks/terraform-provider-databricks v1.47.0 h1:9Vww52FWuhJ8YNB8vz7PdQEjFRznt/cciGmHndcYwLc= -github.com/databricks/terraform-provider-databricks v1.47.0/go.mod h1:7yWyWm4ZA7f1x2Lm50DqCWXBaYi6N/M1KRysMQWgO3k= +github.com/databricks/databricks-sdk-go v0.43.0 h1:x4laolWhYlsQg2t8yWEGyRPZy4/Wv3pKnLEoJfVin7I= +github.com/databricks/databricks-sdk-go v0.43.0/go.mod h1:a9rr0FOHLL26kOjQjZZVFjIYmRABCbrAWVeundDEVG8= +github.com/databricks/terraform-provider-databricks v1.48.0 h1:SsJ0cTvaipwiuySB60SLkN6mH71sfLo5D4FWFvItoh8= +github.com/databricks/terraform-provider-databricks v1.48.0/go.mod h1:LtCv5f2Vfoyv9aBy3CY1PYgRovuE/tDoYt64hdS4Uts= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1516,8 +1519,9 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -1576,8 +1580,8 @@ github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= @@ -2077,8 +2081,8 @@ go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= -go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -2307,8 +2311,8 @@ golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQ golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2655,8 +2659,8 @@ google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvy google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk= google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= -google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= +google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2818,8 +2822,8 @@ google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -2837,8 +2841,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go. google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= @@ -2859,8 +2863,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 h1:8EeVk1VKMD+GD/neyEHGmz7pFblqPjHoi+PGQIlLx2s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2911,8 +2915,8 @@ google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSs google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2934,8 +2938,8 @@ google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= -google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/sdk/dotnet/Config/Config.cs b/sdk/dotnet/Config/Config.cs index fb6083a6..b8f3cc5b 100644 --- a/sdk/dotnet/Config/Config.cs +++ b/sdk/dotnet/Config/Config.cs @@ -207,6 +207,13 @@ public static int? RetryTimeoutSeconds set => _retryTimeoutSeconds.Set(value); } + private static readonly __Value _serverlessComputeId = new __Value(() => __config.Get("serverlessComputeId")); + public static string? ServerlessComputeId + { + get => _serverlessComputeId.Get(); + set => _serverlessComputeId.Set(value); + } + private static readonly __Value _skipVerify = new __Value(() => __config.GetBoolean("skipVerify")); public static bool? SkipVerify { diff --git a/sdk/dotnet/GetAwsAssumeRolePolicy.cs b/sdk/dotnet/GetAwsAssumeRolePolicy.cs index b6e6ad64..4b4e1b5a 100644 --- a/sdk/dotnet/GetAwsAssumeRolePolicy.cs +++ b/sdk/dotnet/GetAwsAssumeRolePolicy.cs @@ -71,7 +71,7 @@ public static class GetAwsAssumeRolePolicy /// /// The following resources are used in the same context: /// - /// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide /// * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. /// * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). /// @@ -138,7 +138,7 @@ public static Task InvokeAsync(GetAwsAssumeRolePol /// /// The following resources are used in the same context: /// - /// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide /// * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. /// * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). /// diff --git a/sdk/dotnet/GetAwsBucketPolicy.cs b/sdk/dotnet/GetAwsBucketPolicy.cs index 4dff86a0..6730e24a 100644 --- a/sdk/dotnet/GetAwsBucketPolicy.cs +++ b/sdk/dotnet/GetAwsBucketPolicy.cs @@ -37,7 +37,7 @@ public sealed class GetAwsBucketPolicyArgs : global::Pulumi.InvokeArgs public string? DatabricksAccountId { get; set; } /// - /// Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + /// Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket /// [Input("databricksE2AccountId")] public string? DatabricksE2AccountId { get; set; } @@ -66,7 +66,7 @@ public sealed class GetAwsBucketPolicyInvokeArgs : global::Pulumi.InvokeArgs public Input? DatabricksAccountId { get; set; } /// - /// Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + /// Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket /// [Input("databricksE2AccountId")] public Input? DatabricksE2AccountId { get; set; } diff --git a/sdk/dotnet/GetAwsCrossAccountPolicy.cs b/sdk/dotnet/GetAwsCrossAccountPolicy.cs index 57fe66a5..fb83eeb5 100644 --- a/sdk/dotnet/GetAwsCrossAccountPolicy.cs +++ b/sdk/dotnet/GetAwsCrossAccountPolicy.cs @@ -37,7 +37,7 @@ public static class GetAwsCrossAccountPolicy /// /// The following resources are used in the same context: /// - /// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide /// * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. /// * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. /// * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -71,7 +71,7 @@ public static Task InvokeAsync(GetAwsCrossAccoun /// /// The following resources are used in the same context: /// - /// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide /// * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. /// * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. /// * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. diff --git a/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs b/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs index c6a91677..6748c200 100644 --- a/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs +++ b/sdk/dotnet/GetAwsUnityCatalogAssumeRolePolicy.cs @@ -14,7 +14,7 @@ public static class GetAwsUnityCatalogAssumeRolePolicy /// /// > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. /// - /// This data source constructs necessary AWS Unity Catalog assume role policy for you. + /// This data source constructs the necessary AWS Unity Catalog assume role policy for you. /// /// ## Example Usage /// @@ -51,7 +51,7 @@ public static class GetAwsUnityCatalogAssumeRolePolicy /// var metastoreDataAccess = new Aws.Iam.Role("metastore_data_access", new() /// { /// Name = $"{prefix}-uc-access", - /// AssumeRolePolicy = passroleForUc.Json, + /// AssumeRolePolicy = thisAwsIamPolicyDocument.Json, /// ManagedPolicyArns = new[] /// { /// unityMetastore.Arn, @@ -67,7 +67,7 @@ public static Task InvokeAsync(GetAwsU /// /// > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. /// - /// This data source constructs necessary AWS Unity Catalog assume role policy for you. + /// This data source constructs the necessary AWS Unity Catalog assume role policy for you. /// /// ## Example Usage /// @@ -104,7 +104,7 @@ public static Task InvokeAsync(GetAwsU /// var metastoreDataAccess = new Aws.Iam.Role("metastore_data_access", new() /// { /// Name = $"{prefix}-uc-access", - /// AssumeRolePolicy = passroleForUc.Json, + /// AssumeRolePolicy = thisAwsIamPolicyDocument.Json, /// ManagedPolicyArns = new[] /// { /// unityMetastore.Arn, @@ -134,7 +134,7 @@ public sealed class GetAwsUnityCatalogAssumeRolePolicyArgs : global::Pulumi.Invo public string ExternalId { get; set; } = null!; /// - /// The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + /// The name of the AWS IAM role to be created for Unity Catalog. /// [Input("roleName", required: true)] public string RoleName { get; set; } = null!; @@ -166,7 +166,7 @@ public sealed class GetAwsUnityCatalogAssumeRolePolicyInvokeArgs : global::Pulum public Input ExternalId { get; set; } = null!; /// - /// The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + /// The name of the AWS IAM role to be created for Unity Catalog. /// [Input("roleName", required: true)] public Input RoleName { get; set; } = null!; diff --git a/sdk/dotnet/GetAwsUnityCatalogPolicy.cs b/sdk/dotnet/GetAwsUnityCatalogPolicy.cs index ec7f5dae..6c1cab8d 100644 --- a/sdk/dotnet/GetAwsUnityCatalogPolicy.cs +++ b/sdk/dotnet/GetAwsUnityCatalogPolicy.cs @@ -14,7 +14,7 @@ public static class GetAwsUnityCatalogPolicy /// /// > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. /// - /// This data source constructs necessary AWS Unity Catalog policy for you. + /// This data source constructs the necessary AWS Unity Catalog policy for you. /// /// ## Example Usage /// @@ -51,7 +51,7 @@ public static class GetAwsUnityCatalogPolicy /// var metastoreDataAccess = new Aws.Iam.Role("metastore_data_access", new() /// { /// Name = $"{prefix}-uc-access", - /// AssumeRolePolicy = passroleForUc.Json, + /// AssumeRolePolicy = thisAwsIamPolicyDocument.Json, /// ManagedPolicyArns = new[] /// { /// unityMetastore.Arn, @@ -67,7 +67,7 @@ public static Task InvokeAsync(GetAwsUnityCatalo /// /// > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. /// - /// This data source constructs necessary AWS Unity Catalog policy for you. + /// This data source constructs the necessary AWS Unity Catalog policy for you. /// /// ## Example Usage /// @@ -104,7 +104,7 @@ public static Task InvokeAsync(GetAwsUnityCatalo /// var metastoreDataAccess = new Aws.Iam.Role("metastore_data_access", new() /// { /// Name = $"{prefix}-uc-access", - /// AssumeRolePolicy = passroleForUc.Json, + /// AssumeRolePolicy = thisAwsIamPolicyDocument.Json, /// ManagedPolicyArns = new[] /// { /// unityMetastore.Arn, diff --git a/sdk/dotnet/GetMwsCredentials.cs b/sdk/dotnet/GetMwsCredentials.cs index d0dd398b..aa9b4732 100644 --- a/sdk/dotnet/GetMwsCredentials.cs +++ b/sdk/dotnet/GetMwsCredentials.cs @@ -48,7 +48,7 @@ public static class GetMwsCredentials /// * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. /// * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// public static Task InvokeAsync(GetMwsCredentialsArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.InvokeAsync("databricks:index/getMwsCredentials:getMwsCredentials", args ?? new GetMwsCredentialsArgs(), options.WithDefaults()); @@ -90,7 +90,7 @@ public static Task InvokeAsync(GetMwsCredentialsArgs? a /// * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. /// * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// public static Output Invoke(GetMwsCredentialsInvokeArgs? args = null, InvokeOptions? options = null) => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getMwsCredentials:getMwsCredentials", args ?? new GetMwsCredentialsInvokeArgs(), options.WithDefaults()); diff --git a/sdk/dotnet/GetMwsWorkspaces.cs b/sdk/dotnet/GetMwsWorkspaces.cs index 659dce61..b817ec07 100644 --- a/sdk/dotnet/GetMwsWorkspaces.cs +++ b/sdk/dotnet/GetMwsWorkspaces.cs @@ -43,7 +43,7 @@ public static class GetMwsWorkspaces /// /// The following resources are used in the same context: /// - /// * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + /// * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. /// * databricks.MetastoreAssignment /// public static Task InvokeAsync(GetMwsWorkspacesArgs? args = null, InvokeOptions? options = null) @@ -81,7 +81,7 @@ public static Task InvokeAsync(GetMwsWorkspacesArgs? arg /// /// The following resources are used in the same context: /// - /// * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + /// * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. /// * databricks.MetastoreAssignment /// public static Output Invoke(GetMwsWorkspacesInvokeArgs? args = null, InvokeOptions? options = null) diff --git a/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfo.cs b/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfo.cs index 7dfdc9f9..aa44049b 100644 --- a/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfo.cs +++ b/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfo.cs @@ -57,6 +57,9 @@ public sealed class GetExternalLocationExternalLocationInfoArgs : global::Pulumi [Input("encryptionDetails")] public Inputs.GetExternalLocationExternalLocationInfoEncryptionDetailsArgs? EncryptionDetails { get; set; } + [Input("isolationMode")] + public string? IsolationMode { get; set; } + /// /// Unique identifier of the parent Metastore. /// diff --git a/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfoArgs.cs b/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfoArgs.cs index b2ccd33c..4499c4c0 100644 --- a/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfoArgs.cs +++ b/sdk/dotnet/Inputs/GetExternalLocationExternalLocationInfoArgs.cs @@ -57,6 +57,9 @@ public sealed class GetExternalLocationExternalLocationInfoInputArgs : global::P [Input("encryptionDetails")] public Input? EncryptionDetails { get; set; } + [Input("isolationMode")] + public Input? IsolationMode { get; set; } + /// /// Unique identifier of the parent Metastore. /// diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotifications.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotifications.cs index ed5c39c3..d11866e9 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotifications.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotifications.cs @@ -39,6 +39,14 @@ public List OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private List? _onStreamingBacklogExceededs; + public List OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new List()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private List? _onSuccesses; public List OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.cs index d70d574f..27172f34 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.cs @@ -39,6 +39,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; public InputList OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotifications.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotifications.cs index c339e1f0..550dc7cb 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotifications.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotifications.cs @@ -39,6 +39,14 @@ public List OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private List? _onStreamingBacklogExceededs; + public List OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new List()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private List? _onSuccesses; public List OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.cs index cfe86e94..c9bfc902 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.cs @@ -39,6 +39,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; public InputList OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.cs index 0c44759c..86f7d281 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.cs @@ -39,6 +39,14 @@ public List OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private List? _onStreamingBacklogExceededs; + public List OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new List()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private List? _onSuccesses; public List OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.cs index 464d68f4..00e7bed4 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.cs @@ -39,6 +39,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; public InputList OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.cs index e319dd53..7c787a51 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.cs @@ -36,6 +36,14 @@ public List _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private List? _onStreamingBacklogExceededs; + public List OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new List()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private List? _onSuccesses; public List OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.cs index 3988fed1..1b23eeb0 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.cs @@ -36,6 +36,14 @@ public InputList _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; public InputList OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs new file mode 100644 index 00000000..51bc7668 --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs : global::Pulumi.InvokeArgs + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + [Input("id", required: true)] + public string Id { get; set; } = null!; + + public GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs() + { + } + public static new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty => new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs new file mode 100644 index 00000000..cd77be1a --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs : global::Pulumi.ResourceArgs + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs() + { + } + public static new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs Empty => new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.cs index 20c3c02c..b8f19694 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.cs @@ -36,6 +36,14 @@ public List set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private List? _onStreamingBacklogExceededs; + public List OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new List()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private List? _onSuccesses; public List OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.cs index 720ad04a..9f1b775b 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.cs @@ -36,6 +36,14 @@ public InputList _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; public InputList OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.cs new file mode 100644 index 00000000..5a6f1c2f --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs : global::Pulumi.InvokeArgs + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + [Input("id", required: true)] + public string Id { get; set; } = null!; + + public GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs() + { + } + public static new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty => new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs new file mode 100644 index 00000000..18430841 --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs : global::Pulumi.ResourceArgs + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs() + { + } + public static new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs Empty => new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotifications.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotifications.cs index 712f6bba..03b38d27 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotifications.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotifications.cs @@ -36,6 +36,14 @@ public List OnS set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private List? _onStreamingBacklogExceededs; + public List OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new List()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private List? _onSuccesses; public List OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.cs index 683e6f86..2a986079 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.cs @@ -36,6 +36,14 @@ public InputList _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; public InputList OnSuccesses diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.cs new file mode 100644 index 00000000..9732ef47 --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs : global::Pulumi.InvokeArgs + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + [Input("id", required: true)] + public string Id { get; set; } = null!; + + public GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs() + { + } + public static new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs Empty => new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.cs new file mode 100644 index 00000000..5f4451ab --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInputArgs : global::Pulumi.ResourceArgs + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInputArgs() + { + } + public static new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInputArgs Empty => new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfo.cs b/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfo.cs index 10d5f828..02325b75 100644 --- a/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfo.cs +++ b/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfo.cs @@ -60,6 +60,9 @@ public sealed class GetStorageCredentialStorageCredentialInfoArgs : global::Pulu [Input("id")] public string? Id { get; set; } + [Input("isolationMode")] + public string? IsolationMode { get; set; } + /// /// Unique identifier of the parent Metastore. /// diff --git a/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfoArgs.cs b/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfoArgs.cs index 76c31929..aca1fd84 100644 --- a/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfoArgs.cs +++ b/sdk/dotnet/Inputs/GetStorageCredentialStorageCredentialInfoArgs.cs @@ -60,6 +60,9 @@ public sealed class GetStorageCredentialStorageCredentialInfoInputArgs : global: [Input("id")] public Input? Id { get; set; } + [Input("isolationMode")] + public Input? IsolationMode { get; set; } + /// /// Unique identifier of the parent Metastore. /// diff --git a/sdk/dotnet/Inputs/JobEmailNotificationsArgs.cs b/sdk/dotnet/Inputs/JobEmailNotificationsArgs.cs index b27863f8..87f329ef 100644 --- a/sdk/dotnet/Inputs/JobEmailNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/JobEmailNotificationsArgs.cs @@ -56,6 +56,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobEmailNotificationsGetArgs.cs b/sdk/dotnet/Inputs/JobEmailNotificationsGetArgs.cs index 41f8f279..49a4763e 100644 --- a/sdk/dotnet/Inputs/JobEmailNotificationsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobEmailNotificationsGetArgs.cs @@ -56,6 +56,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskEmailNotificationsArgs.cs b/sdk/dotnet/Inputs/JobTaskEmailNotificationsArgs.cs index a53400fa..0ec9e681 100644 --- a/sdk/dotnet/Inputs/JobTaskEmailNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskEmailNotificationsArgs.cs @@ -56,6 +56,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskEmailNotificationsGetArgs.cs b/sdk/dotnet/Inputs/JobTaskEmailNotificationsGetArgs.cs index 49cc5f09..c088779b 100644 --- a/sdk/dotnet/Inputs/JobTaskEmailNotificationsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskEmailNotificationsGetArgs.cs @@ -56,6 +56,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.cs b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.cs index 2c42efa9..4d43c983 100644 --- a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.cs @@ -56,6 +56,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsGetArgs.cs b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsGetArgs.cs index 95803704..a5ef97fb 100644 --- a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskEmailNotificationsGetArgs.cs @@ -56,6 +56,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.cs b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.cs index 0b7a0de6..e6fbeb03 100644 --- a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.cs @@ -52,6 +52,14 @@ public InputList O set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsGetArgs.cs b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsGetArgs.cs index 5b743c53..4790549c 100644 --- a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsGetArgs.cs @@ -52,6 +52,14 @@ public InputList _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs new file mode 100644 index 00000000..81c38eac --- /dev/null +++ b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs : global::Pulumi.ResourceArgs + { + /// + /// ID of the job + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs() + { + } + public static new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty => new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs new file mode 100644 index 00000000..f928a3f9 --- /dev/null +++ b/sdk/dotnet/Inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs : global::Pulumi.ResourceArgs + { + /// + /// ID of the job + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs() + { + } + public static new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs Empty => new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobTaskWebhookNotificationsArgs.cs b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsArgs.cs index b73ec34f..8f0951a4 100644 --- a/sdk/dotnet/Inputs/JobTaskWebhookNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsArgs.cs @@ -52,6 +52,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskWebhookNotificationsGetArgs.cs b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsGetArgs.cs index 58130d81..6608f709 100644 --- a/sdk/dotnet/Inputs/JobTaskWebhookNotificationsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsGetArgs.cs @@ -52,6 +52,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs new file mode 100644 index 00000000..591642bc --- /dev/null +++ b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs : global::Pulumi.ResourceArgs + { + /// + /// ID of the job + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs() + { + } + public static new JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty => new JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs new file mode 100644 index 00000000..e798c506 --- /dev/null +++ b/sdk/dotnet/Inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs : global::Pulumi.ResourceArgs + { + /// + /// ID of the job + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs() + { + } + public static new JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs Empty => new JobTaskWebhookNotificationsOnStreamingBacklogExceededGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobTriggerArgs.cs b/sdk/dotnet/Inputs/JobTriggerArgs.cs index 98ca7bab..ab48337c 100644 --- a/sdk/dotnet/Inputs/JobTriggerArgs.cs +++ b/sdk/dotnet/Inputs/JobTriggerArgs.cs @@ -24,6 +24,9 @@ public sealed class JobTriggerArgs : global::Pulumi.ResourceArgs [Input("pauseStatus")] public Input? PauseStatus { get; set; } + [Input("periodic")] + public Input? Periodic { get; set; } + [Input("table")] public Input? Table { get; set; } diff --git a/sdk/dotnet/Inputs/JobTriggerGetArgs.cs b/sdk/dotnet/Inputs/JobTriggerGetArgs.cs index 62d9de6d..a589e145 100644 --- a/sdk/dotnet/Inputs/JobTriggerGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTriggerGetArgs.cs @@ -24,6 +24,9 @@ public sealed class JobTriggerGetArgs : global::Pulumi.ResourceArgs [Input("pauseStatus")] public Input? PauseStatus { get; set; } + [Input("periodic")] + public Input? Periodic { get; set; } + [Input("table")] public Input? Table { get; set; } diff --git a/sdk/dotnet/Inputs/JobTriggerPeriodicArgs.cs b/sdk/dotnet/Inputs/JobTriggerPeriodicArgs.cs new file mode 100644 index 00000000..69c7f9bd --- /dev/null +++ b/sdk/dotnet/Inputs/JobTriggerPeriodicArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTriggerPeriodicArgs : global::Pulumi.ResourceArgs + { + [Input("interval", required: true)] + public Input Interval { get; set; } = null!; + + [Input("unit", required: true)] + public Input Unit { get; set; } = null!; + + public JobTriggerPeriodicArgs() + { + } + public static new JobTriggerPeriodicArgs Empty => new JobTriggerPeriodicArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobTriggerPeriodicGetArgs.cs b/sdk/dotnet/Inputs/JobTriggerPeriodicGetArgs.cs new file mode 100644 index 00000000..25824bb1 --- /dev/null +++ b/sdk/dotnet/Inputs/JobTriggerPeriodicGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTriggerPeriodicGetArgs : global::Pulumi.ResourceArgs + { + [Input("interval", required: true)] + public Input Interval { get; set; } = null!; + + [Input("unit", required: true)] + public Input Unit { get; set; } = null!; + + public JobTriggerPeriodicGetArgs() + { + } + public static new JobTriggerPeriodicGetArgs Empty => new JobTriggerPeriodicGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobWebhookNotificationsArgs.cs b/sdk/dotnet/Inputs/JobWebhookNotificationsArgs.cs index e2f9912d..cbbebdd1 100644 --- a/sdk/dotnet/Inputs/JobWebhookNotificationsArgs.cs +++ b/sdk/dotnet/Inputs/JobWebhookNotificationsArgs.cs @@ -52,6 +52,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobWebhookNotificationsGetArgs.cs b/sdk/dotnet/Inputs/JobWebhookNotificationsGetArgs.cs index 685b819d..de2f5654 100644 --- a/sdk/dotnet/Inputs/JobWebhookNotificationsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobWebhookNotificationsGetArgs.cs @@ -52,6 +52,14 @@ public InputList OnStarts set => _onStarts = value; } + [Input("onStreamingBacklogExceededs")] + private InputList? _onStreamingBacklogExceededs; + public InputList OnStreamingBacklogExceededs + { + get => _onStreamingBacklogExceededs ?? (_onStreamingBacklogExceededs = new InputList()); + set => _onStreamingBacklogExceededs = value; + } + [Input("onSuccesses")] private InputList? _onSuccesses; diff --git a/sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.cs b/sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.cs new file mode 100644 index 00000000..7ecbca52 --- /dev/null +++ b/sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobWebhookNotificationsOnStreamingBacklogExceededArgs : global::Pulumi.ResourceArgs + { + /// + /// ID of the job + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public JobWebhookNotificationsOnStreamingBacklogExceededArgs() + { + } + public static new JobWebhookNotificationsOnStreamingBacklogExceededArgs Empty => new JobWebhookNotificationsOnStreamingBacklogExceededArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs b/sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs new file mode 100644 index 00000000..ca048c2c --- /dev/null +++ b/sdk/dotnet/Inputs/JobWebhookNotificationsOnStreamingBacklogExceededGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobWebhookNotificationsOnStreamingBacklogExceededGetArgs : global::Pulumi.ResourceArgs + { + /// + /// ID of the job + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public JobWebhookNotificationsOnStreamingBacklogExceededGetArgs() + { + } + public static new JobWebhookNotificationsOnStreamingBacklogExceededGetArgs Empty => new JobWebhookNotificationsOnStreamingBacklogExceededGetArgs(); + } +} diff --git a/sdk/dotnet/IpAccessList.cs b/sdk/dotnet/IpAccessList.cs index 3081af2c..80328457 100644 --- a/sdk/dotnet/IpAccessList.cs +++ b/sdk/dotnet/IpAccessList.cs @@ -58,7 +58,7 @@ namespace Pulumi.Databricks /// The following resources are often used in the same context: /// /// * End to end workspace management guide. - /// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. /// * databricks.MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). /// * databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. diff --git a/sdk/dotnet/MwsCredentials.cs b/sdk/dotnet/MwsCredentials.cs index e3908050..0dd8975b 100644 --- a/sdk/dotnet/MwsCredentials.cs +++ b/sdk/dotnet/MwsCredentials.cs @@ -27,7 +27,7 @@ namespace Pulumi.Databricks /// * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. /// * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// /// ## Import /// diff --git a/sdk/dotnet/MwsCustomerManagedKeys.cs b/sdk/dotnet/MwsCustomerManagedKeys.cs index b5419670..9f16496a 100644 --- a/sdk/dotnet/MwsCustomerManagedKeys.cs +++ b/sdk/dotnet/MwsCustomerManagedKeys.cs @@ -373,7 +373,7 @@ namespace Pulumi.Databricks /// * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. /// * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// /// ## Import /// diff --git a/sdk/dotnet/MwsLogDelivery.cs b/sdk/dotnet/MwsLogDelivery.cs index 7e1b74a3..80816b1e 100644 --- a/sdk/dotnet/MwsLogDelivery.cs +++ b/sdk/dotnet/MwsLogDelivery.cs @@ -79,7 +79,7 @@ namespace Pulumi.Databricks /// * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. /// * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// /// ## Import /// @@ -149,7 +149,7 @@ public partial class MwsLogDelivery : global::Pulumi.CustomResource public Output StorageConfigurationId { get; private set; } = null!; /// - /// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + /// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. /// [Output("workspaceIdsFilters")] public Output> WorkspaceIdsFilters { get; private set; } = null!; @@ -264,7 +264,7 @@ public sealed class MwsLogDeliveryArgs : global::Pulumi.ResourceArgs private InputList? _workspaceIdsFilters; /// - /// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + /// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. /// public InputList WorkspaceIdsFilters { @@ -344,7 +344,7 @@ public sealed class MwsLogDeliveryState : global::Pulumi.ResourceArgs private InputList? _workspaceIdsFilters; /// - /// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + /// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. /// public InputList WorkspaceIdsFilters { diff --git a/sdk/dotnet/MwsNetworks.cs b/sdk/dotnet/MwsNetworks.cs index 6d3b3845..a44592e9 100644 --- a/sdk/dotnet/MwsNetworks.cs +++ b/sdk/dotnet/MwsNetworks.cs @@ -72,13 +72,13 @@ namespace Pulumi.Databricks /// The following resources are used in the same context: /// /// * Provisioning Databricks on AWS guide. - /// * Provisioning Databricks on AWS with PrivateLink guide. - /// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + /// * Provisioning Databricks on AWS with Private Link guide. + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. /// * Provisioning Databricks on GCP guide. /// * Provisioning Databricks workspaces on GCP with Private Service Connect guide. /// * databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration. /// * databricks.MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html). - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// /// ## Import /// diff --git a/sdk/dotnet/MwsPrivateAccessSettings.cs b/sdk/dotnet/MwsPrivateAccessSettings.cs index 6b9afd5c..2a599e0e 100644 --- a/sdk/dotnet/MwsPrivateAccessSettings.cs +++ b/sdk/dotnet/MwsPrivateAccessSettings.cs @@ -114,12 +114,12 @@ namespace Pulumi.Databricks /// The following resources are used in the same context: /// /// * Provisioning Databricks on AWS guide. - /// * Provisioning Databricks on AWS with PrivateLink guide. - /// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + /// * Provisioning Databricks on AWS with Private Link guide. + /// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. /// * Provisioning Databricks workspaces on GCP with Private Service Connect guide. /// * databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration. /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// /// ## Import /// diff --git a/sdk/dotnet/MwsStorageConfigurations.cs b/sdk/dotnet/MwsStorageConfigurations.cs index bc290a8a..7721a3c0 100644 --- a/sdk/dotnet/MwsStorageConfigurations.cs +++ b/sdk/dotnet/MwsStorageConfigurations.cs @@ -24,12 +24,12 @@ namespace Pulumi.Databricks /// The following resources are used in the same context: /// /// * Provisioning Databricks on AWS guide. - /// * Provisioning Databricks on AWS with PrivateLink guide. + /// * Provisioning Databricks on AWS with Private Link guide. /// * databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS. /// * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. /// * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). /// * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - /// * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + /// * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). /// /// ## Import /// diff --git a/sdk/dotnet/OnlineTable.cs b/sdk/dotnet/OnlineTable.cs index dff115c1..b62d953f 100644 --- a/sdk/dotnet/OnlineTable.cs +++ b/sdk/dotnet/OnlineTable.cs @@ -72,6 +72,9 @@ public partial class OnlineTable : global::Pulumi.CustomResource [Output("statuses")] public Output> Statuses { get; private set; } = null!; + [Output("tableServingUrl")] + public Output TableServingUrl { get; private set; } = null!; + /// /// Create a OnlineTable resource with the given unique name, arguments, and options. @@ -130,6 +133,9 @@ public sealed class OnlineTableArgs : global::Pulumi.ResourceArgs [Input("spec")] public Input? Spec { get; set; } + [Input("tableServingUrl")] + public Input? TableServingUrl { get; set; } + public OnlineTableArgs() { } @@ -162,6 +168,9 @@ public InputList Statuses set => _statuses = value; } + [Input("tableServingUrl")] + public Input? TableServingUrl { get; set; } + public OnlineTableState() { } diff --git a/sdk/dotnet/Outputs/GetExternalLocationExternalLocationInfoResult.cs b/sdk/dotnet/Outputs/GetExternalLocationExternalLocationInfoResult.cs index d5f80125..dd2428b7 100644 --- a/sdk/dotnet/Outputs/GetExternalLocationExternalLocationInfoResult.cs +++ b/sdk/dotnet/Outputs/GetExternalLocationExternalLocationInfoResult.cs @@ -42,6 +42,7 @@ public sealed class GetExternalLocationExternalLocationInfoResult /// The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). /// public readonly Outputs.GetExternalLocationExternalLocationInfoEncryptionDetailsResult? EncryptionDetails; + public readonly string? IsolationMode; /// /// Unique identifier of the parent Metastore. /// @@ -89,6 +90,8 @@ private GetExternalLocationExternalLocationInfoResult( Outputs.GetExternalLocationExternalLocationInfoEncryptionDetailsResult? encryptionDetails, + string? isolationMode, + string? metastoreId, string? name, @@ -111,6 +114,7 @@ private GetExternalLocationExternalLocationInfoResult( CredentialId = credentialId; CredentialName = credentialName; EncryptionDetails = encryptionDetails; + IsolationMode = isolationMode; MetastoreId = metastoreId; Name = name; Owner = owner; diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsEmailNotificationsResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsEmailNotificationsResult.cs index 1d783d3d..b01afcfb 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsEmailNotificationsResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsEmailNotificationsResult.cs @@ -17,6 +17,7 @@ public sealed class GetJobJobSettingsSettingsEmailNotificationsResult public readonly ImmutableArray OnDurationWarningThresholdExceededs; public readonly ImmutableArray OnFailures; public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; public readonly ImmutableArray OnSuccesses; [OutputConstructor] @@ -29,12 +30,15 @@ private GetJobJobSettingsSettingsEmailNotificationsResult( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { NoAlertForSkippedRuns = noAlertForSkippedRuns; OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskEmailNotificationsResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskEmailNotificationsResult.cs index 7e34d534..a66741f5 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskEmailNotificationsResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskEmailNotificationsResult.cs @@ -17,6 +17,7 @@ public sealed class GetJobJobSettingsSettingsTaskEmailNotificationsResult public readonly ImmutableArray OnDurationWarningThresholdExceededs; public readonly ImmutableArray OnFailures; public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; public readonly ImmutableArray OnSuccesses; [OutputConstructor] @@ -29,12 +30,15 @@ private GetJobJobSettingsSettingsTaskEmailNotificationsResult( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { NoAlertForSkippedRuns = noAlertForSkippedRuns; OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsResult.cs index 59e20b8f..9e523a0a 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsResult.cs @@ -17,6 +17,7 @@ public sealed class GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificatio public readonly ImmutableArray OnDurationWarningThresholdExceededs; public readonly ImmutableArray OnFailures; public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; public readonly ImmutableArray OnSuccesses; [OutputConstructor] @@ -29,12 +30,15 @@ private GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsResult( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { NoAlertForSkippedRuns = noAlertForSkippedRuns; OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs new file mode 100644 index 00000000..035e3e6c --- /dev/null +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + public readonly string Id; + + [OutputConstructor] + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsResult.cs index aa8dc71f..c9696f03 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsResult.cs @@ -16,6 +16,7 @@ public sealed class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificat public readonly ImmutableArray OnDurationWarningThresholdExceededs; public readonly ImmutableArray OnFailures; public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; public readonly ImmutableArray OnSuccesses; [OutputConstructor] @@ -26,11 +27,14 @@ private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsResult( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs new file mode 100644 index 00000000..44b2e18f --- /dev/null +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + public readonly string Id; + + [OutputConstructor] + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsResult.cs index 1537a0fd..116da91b 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsResult.cs @@ -16,6 +16,7 @@ public sealed class GetJobJobSettingsSettingsTaskWebhookNotificationsResult public readonly ImmutableArray OnDurationWarningThresholdExceededs; public readonly ImmutableArray OnFailures; public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; public readonly ImmutableArray OnSuccesses; [OutputConstructor] @@ -26,11 +27,14 @@ private GetJobJobSettingsSettingsTaskWebhookNotificationsResult( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult.cs new file mode 100644 index 00000000..4ead7be0 --- /dev/null +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult + { + /// + /// the id of databricks.Job if the resource was matched by name. + /// + public readonly string Id; + + [OutputConstructor] + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsResult.cs index 80157fa6..4c34d133 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsWebhookNotificationsResult.cs @@ -16,6 +16,7 @@ public sealed class GetJobJobSettingsSettingsWebhookNotificationsResult public readonly ImmutableArray OnDurationWarningThresholdExceededs; public readonly ImmutableArray OnFailures; public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; public readonly ImmutableArray OnSuccesses; [OutputConstructor] @@ -26,11 +27,14 @@ private GetJobJobSettingsSettingsWebhookNotificationsResult( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/GetStorageCredentialStorageCredentialInfoResult.cs b/sdk/dotnet/Outputs/GetStorageCredentialStorageCredentialInfoResult.cs index 7ba62056..0b781afe 100644 --- a/sdk/dotnet/Outputs/GetStorageCredentialStorageCredentialInfoResult.cs +++ b/sdk/dotnet/Outputs/GetStorageCredentialStorageCredentialInfoResult.cs @@ -43,6 +43,7 @@ public sealed class GetStorageCredentialStorageCredentialInfoResult /// Unique ID of storage credential. /// public readonly string? Id; + public readonly string? IsolationMode; /// /// Unique identifier of the parent Metastore. /// @@ -89,6 +90,8 @@ private GetStorageCredentialStorageCredentialInfoResult( string? id, + string? isolationMode, + string? metastoreId, string? name, @@ -112,6 +115,7 @@ private GetStorageCredentialStorageCredentialInfoResult( CreatedBy = createdBy; DatabricksGcpServiceAccount = databricksGcpServiceAccount; Id = id; + IsolationMode = isolationMode; MetastoreId = metastoreId; Name = name; Owner = owner; diff --git a/sdk/dotnet/Outputs/JobEmailNotifications.cs b/sdk/dotnet/Outputs/JobEmailNotifications.cs index 367b27d0..88a38772 100644 --- a/sdk/dotnet/Outputs/JobEmailNotifications.cs +++ b/sdk/dotnet/Outputs/JobEmailNotifications.cs @@ -31,6 +31,7 @@ public sealed class JobEmailNotifications /// (List) list of emails to notify when the run starts. /// public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; /// /// (List) list of emails to notify when the run completes successfully. /// @@ -46,12 +47,15 @@ private JobEmailNotifications( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { NoAlertForSkippedRuns = noAlertForSkippedRuns; OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/JobTaskEmailNotifications.cs b/sdk/dotnet/Outputs/JobTaskEmailNotifications.cs index f2e2868c..0edfc8bb 100644 --- a/sdk/dotnet/Outputs/JobTaskEmailNotifications.cs +++ b/sdk/dotnet/Outputs/JobTaskEmailNotifications.cs @@ -31,6 +31,7 @@ public sealed class JobTaskEmailNotifications /// (List) list of emails to notify when the run starts. /// public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; /// /// (List) list of emails to notify when the run completes successfully. /// @@ -46,12 +47,15 @@ private JobTaskEmailNotifications( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { NoAlertForSkippedRuns = noAlertForSkippedRuns; OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/JobTaskForEachTaskTaskEmailNotifications.cs b/sdk/dotnet/Outputs/JobTaskForEachTaskTaskEmailNotifications.cs index b0e58702..e63532d0 100644 --- a/sdk/dotnet/Outputs/JobTaskForEachTaskTaskEmailNotifications.cs +++ b/sdk/dotnet/Outputs/JobTaskForEachTaskTaskEmailNotifications.cs @@ -31,6 +31,7 @@ public sealed class JobTaskForEachTaskTaskEmailNotifications /// (List) list of emails to notify when the run starts. /// public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; /// /// (List) list of emails to notify when the run completes successfully. /// @@ -46,12 +47,15 @@ private JobTaskForEachTaskTaskEmailNotifications( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { NoAlertForSkippedRuns = noAlertForSkippedRuns; OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotifications.cs b/sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotifications.cs index c12032bc..a5484167 100644 --- a/sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotifications.cs +++ b/sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotifications.cs @@ -29,6 +29,7 @@ public sealed class JobTaskForEachTaskTaskWebhookNotifications /// (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. /// public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; /// /// (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. /// @@ -42,11 +43,14 @@ private JobTaskForEachTaskTaskWebhookNotifications( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs b/sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs new file mode 100644 index 00000000..79d497e1 --- /dev/null +++ b/sdk/dotnet/Outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded + { + /// + /// ID of the job + /// + public readonly string Id; + + [OutputConstructor] + private JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Outputs/JobTaskWebhookNotifications.cs b/sdk/dotnet/Outputs/JobTaskWebhookNotifications.cs index fe05fc9e..caf90079 100644 --- a/sdk/dotnet/Outputs/JobTaskWebhookNotifications.cs +++ b/sdk/dotnet/Outputs/JobTaskWebhookNotifications.cs @@ -29,6 +29,7 @@ public sealed class JobTaskWebhookNotifications /// (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. /// public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; /// /// (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. /// @@ -42,11 +43,14 @@ private JobTaskWebhookNotifications( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.cs b/sdk/dotnet/Outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.cs new file mode 100644 index 00000000..1fba520a --- /dev/null +++ b/sdk/dotnet/Outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class JobTaskWebhookNotificationsOnStreamingBacklogExceeded + { + /// + /// ID of the job + /// + public readonly string Id; + + [OutputConstructor] + private JobTaskWebhookNotificationsOnStreamingBacklogExceeded(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Outputs/JobTrigger.cs b/sdk/dotnet/Outputs/JobTrigger.cs index e94f3689..ebac2305 100644 --- a/sdk/dotnet/Outputs/JobTrigger.cs +++ b/sdk/dotnet/Outputs/JobTrigger.cs @@ -21,6 +21,7 @@ public sealed class JobTrigger /// Indicate whether this trigger is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pause_status` field is omitted in the block, the server will default to using `UNPAUSED` as a value for `pause_status`. /// public readonly string? PauseStatus; + public readonly Outputs.JobTriggerPeriodic? Periodic; public readonly Outputs.JobTriggerTable? Table; public readonly Outputs.JobTriggerTableUpdate? TableUpdate; @@ -30,12 +31,15 @@ private JobTrigger( string? pauseStatus, + Outputs.JobTriggerPeriodic? periodic, + Outputs.JobTriggerTable? table, Outputs.JobTriggerTableUpdate? tableUpdate) { FileArrival = fileArrival; PauseStatus = pauseStatus; + Periodic = periodic; Table = table; TableUpdate = tableUpdate; } diff --git a/sdk/dotnet/Outputs/JobTriggerPeriodic.cs b/sdk/dotnet/Outputs/JobTriggerPeriodic.cs new file mode 100644 index 00000000..f0251e76 --- /dev/null +++ b/sdk/dotnet/Outputs/JobTriggerPeriodic.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class JobTriggerPeriodic + { + public readonly int Interval; + public readonly string Unit; + + [OutputConstructor] + private JobTriggerPeriodic( + int interval, + + string unit) + { + Interval = interval; + Unit = unit; + } + } +} diff --git a/sdk/dotnet/Outputs/JobWebhookNotifications.cs b/sdk/dotnet/Outputs/JobWebhookNotifications.cs index 2a5c1309..d8a151b7 100644 --- a/sdk/dotnet/Outputs/JobWebhookNotifications.cs +++ b/sdk/dotnet/Outputs/JobWebhookNotifications.cs @@ -29,6 +29,7 @@ public sealed class JobWebhookNotifications /// (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. /// public readonly ImmutableArray OnStarts; + public readonly ImmutableArray OnStreamingBacklogExceededs; /// /// (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. /// @@ -42,11 +43,14 @@ private JobWebhookNotifications( ImmutableArray onStarts, + ImmutableArray onStreamingBacklogExceededs, + ImmutableArray onSuccesses) { OnDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; OnFailures = onFailures; OnStarts = onStarts; + OnStreamingBacklogExceededs = onStreamingBacklogExceededs; OnSuccesses = onSuccesses; } } diff --git a/sdk/dotnet/Outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.cs b/sdk/dotnet/Outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.cs new file mode 100644 index 00000000..682c6b46 --- /dev/null +++ b/sdk/dotnet/Outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class JobWebhookNotificationsOnStreamingBacklogExceeded + { + /// + /// ID of the job + /// + public readonly string Id; + + [OutputConstructor] + private JobWebhookNotificationsOnStreamingBacklogExceeded(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Provider.cs b/sdk/dotnet/Provider.cs index 35f2d334..afac97ac 100644 --- a/sdk/dotnet/Provider.cs +++ b/sdk/dotnet/Provider.cs @@ -75,6 +75,9 @@ public partial class Provider : global::Pulumi.ProviderResource [Output("profile")] public Output Profile { get; private set; } = null!; + [Output("serverlessComputeId")] + public Output ServerlessComputeId { get; private set; } = null!; + [Output("token")] public Output Token { get; private set; } = null!; @@ -241,6 +244,9 @@ public Input? Password [Input("retryTimeoutSeconds", json: true)] public Input? RetryTimeoutSeconds { get; set; } + [Input("serverlessComputeId")] + public Input? ServerlessComputeId { get; set; } + [Input("skipVerify", json: true)] public Input? SkipVerify { get; set; } diff --git a/sdk/dotnet/SqlPermissions.cs b/sdk/dotnet/SqlPermissions.cs index 71c51668..2e7508e9 100644 --- a/sdk/dotnet/SqlPermissions.cs +++ b/sdk/dotnet/SqlPermissions.cs @@ -108,6 +108,9 @@ public partial class SqlPermissions : global::Pulumi.CustomResource [Output("catalog")] public Output Catalog { get; private set; } = null!; + /// + /// Id of an existing databricks_cluster, otherwise resource creation will fail. + /// [Output("clusterId")] public Output ClusterId { get; private set; } = null!; @@ -196,6 +199,9 @@ public sealed class SqlPermissionsArgs : global::Pulumi.ResourceArgs [Input("catalog")] public Input? Catalog { get; set; } + /// + /// Id of an existing databricks_cluster, otherwise resource creation will fail. + /// [Input("clusterId")] public Input? ClusterId { get; set; } @@ -251,6 +257,9 @@ public sealed class SqlPermissionsState : global::Pulumi.ResourceArgs [Input("catalog")] public Input? Catalog { get; set; } + /// + /// Id of an existing databricks_cluster, otherwise resource creation will fail. + /// [Input("clusterId")] public Input? ClusterId { get; set; } diff --git a/sdk/go/databricks/config/config.go b/sdk/go/databricks/config/config.go index adfb4fe7..1accd026 100644 --- a/sdk/go/databricks/config/config.go +++ b/sdk/go/databricks/config/config.go @@ -86,6 +86,9 @@ func GetRateLimit(ctx *pulumi.Context) int { func GetRetryTimeoutSeconds(ctx *pulumi.Context) int { return config.GetInt(ctx, "databricks:retryTimeoutSeconds") } +func GetServerlessComputeId(ctx *pulumi.Context) string { + return config.Get(ctx, "databricks:serverlessComputeId") +} func GetSkipVerify(ctx *pulumi.Context) bool { return config.GetBool(ctx, "databricks:skipVerify") } diff --git a/sdk/go/databricks/getAwsAssumeRolePolicy.go b/sdk/go/databricks/getAwsAssumeRolePolicy.go index 1f9eead6..9de68eba 100644 --- a/sdk/go/databricks/getAwsAssumeRolePolicy.go +++ b/sdk/go/databricks/getAwsAssumeRolePolicy.go @@ -87,7 +87,7 @@ import ( // // The following resources are used in the same context: // -// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide +// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide // * getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. // * getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). func GetAwsAssumeRolePolicy(ctx *pulumi.Context, args *GetAwsAssumeRolePolicyArgs, opts ...pulumi.InvokeOption) (*GetAwsAssumeRolePolicyResult, error) { diff --git a/sdk/go/databricks/getAwsBucketPolicy.go b/sdk/go/databricks/getAwsBucketPolicy.go index 9769a5b2..fb749f03 100644 --- a/sdk/go/databricks/getAwsBucketPolicy.go +++ b/sdk/go/databricks/getAwsBucketPolicy.go @@ -27,7 +27,7 @@ type GetAwsBucketPolicyArgs struct { // AWS S3 Bucket name for which to generate the policy document. Bucket string `pulumi:"bucket"` DatabricksAccountId *string `pulumi:"databricksAccountId"` - // Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + // Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket DatabricksE2AccountId *string `pulumi:"databricksE2AccountId"` // Data access role that can have full access for this bucket FullAccessRole *string `pulumi:"fullAccessRole"` @@ -63,7 +63,7 @@ type GetAwsBucketPolicyOutputArgs struct { // AWS S3 Bucket name for which to generate the policy document. Bucket pulumi.StringInput `pulumi:"bucket"` DatabricksAccountId pulumi.StringPtrInput `pulumi:"databricksAccountId"` - // Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + // Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket DatabricksE2AccountId pulumi.StringPtrInput `pulumi:"databricksE2AccountId"` // Data access role that can have full access for this bucket FullAccessRole pulumi.StringPtrInput `pulumi:"fullAccessRole"` diff --git a/sdk/go/databricks/getAwsCrossAccountPolicy.go b/sdk/go/databricks/getAwsCrossAccountPolicy.go index 8efc4c91..a3f0f594 100644 --- a/sdk/go/databricks/getAwsCrossAccountPolicy.go +++ b/sdk/go/databricks/getAwsCrossAccountPolicy.go @@ -45,7 +45,7 @@ import ( // // The following resources are used in the same context: // -// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide +// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide // * getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. // * getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. // * InstanceProfile to manage AWS EC2 instance profiles that users can launch Cluster and access data, like databricks_mount. diff --git a/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go b/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go index e11f33e7..193f9d24 100644 --- a/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go +++ b/sdk/go/databricks/getAwsUnityCatalogAssumeRolePolicy.go @@ -13,7 +13,7 @@ import ( // > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. // -// This data source constructs necessary AWS Unity Catalog assume role policy for you. +// This data source constructs the necessary AWS Unity Catalog assume role policy for you. // // ## Example Usage // @@ -58,7 +58,7 @@ import ( // } // _, err = iam.NewRole(ctx, "metastore_data_access", &iam.RoleArgs{ // Name: pulumi.String(fmt.Sprintf("%v-uc-access", prefix)), -// AssumeRolePolicy: pulumi.Any(passroleForUc.Json), +// AssumeRolePolicy: pulumi.Any(thisAwsIamPolicyDocument.Json), // ManagedPolicyArns: pulumi.StringArray{ // unityMetastore.Arn, // }, @@ -87,7 +87,7 @@ type GetAwsUnityCatalogAssumeRolePolicyArgs struct { AwsAccountId string `pulumi:"awsAccountId"` // The storage credential external id. ExternalId string `pulumi:"externalId"` - // The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + // The name of the AWS IAM role to be created for Unity Catalog. RoleName string `pulumi:"roleName"` // The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` UnityCatalogIamArn *string `pulumi:"unityCatalogIamArn"` @@ -123,7 +123,7 @@ type GetAwsUnityCatalogAssumeRolePolicyOutputArgs struct { AwsAccountId pulumi.StringInput `pulumi:"awsAccountId"` // The storage credential external id. ExternalId pulumi.StringInput `pulumi:"externalId"` - // The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + // The name of the AWS IAM role to be created for Unity Catalog. RoleName pulumi.StringInput `pulumi:"roleName"` // The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` UnityCatalogIamArn pulumi.StringPtrInput `pulumi:"unityCatalogIamArn"` diff --git a/sdk/go/databricks/getAwsUnityCatalogPolicy.go b/sdk/go/databricks/getAwsUnityCatalogPolicy.go index bb686410..914fbd56 100644 --- a/sdk/go/databricks/getAwsUnityCatalogPolicy.go +++ b/sdk/go/databricks/getAwsUnityCatalogPolicy.go @@ -13,7 +13,7 @@ import ( // > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. // -// This data source constructs necessary AWS Unity Catalog policy for you. +// This data source constructs the necessary AWS Unity Catalog policy for you. // // ## Example Usage // @@ -58,7 +58,7 @@ import ( // } // _, err = iam.NewRole(ctx, "metastore_data_access", &iam.RoleArgs{ // Name: pulumi.String(fmt.Sprintf("%v-uc-access", prefix)), -// AssumeRolePolicy: pulumi.Any(passroleForUc.Json), +// AssumeRolePolicy: pulumi.Any(thisAwsIamPolicyDocument.Json), // ManagedPolicyArns: pulumi.StringArray{ // unityMetastore.Arn, // }, diff --git a/sdk/go/databricks/getMwsCredentials.go b/sdk/go/databricks/getMwsCredentials.go index 2ff84423..d61496a5 100644 --- a/sdk/go/databricks/getMwsCredentials.go +++ b/sdk/go/databricks/getMwsCredentials.go @@ -53,7 +53,7 @@ import ( // * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). // * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. // * MwsStorageConfigurations to configure root bucket new workspaces within AWS. -// * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). +// * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). func LookupMwsCredentials(ctx *pulumi.Context, args *LookupMwsCredentialsArgs, opts ...pulumi.InvokeOption) (*LookupMwsCredentialsResult, error) { opts = internal.PkgInvokeDefaultOpts(opts) var rv LookupMwsCredentialsResult diff --git a/sdk/go/databricks/getMwsWorkspaces.go b/sdk/go/databricks/getMwsWorkspaces.go index c16a99b9..93e9cb63 100644 --- a/sdk/go/databricks/getMwsWorkspaces.go +++ b/sdk/go/databricks/getMwsWorkspaces.go @@ -48,7 +48,7 @@ import ( // // The following resources are used in the same context: // -// * MwsWorkspaces to manage Databricks E2 Workspaces. +// * MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. // * MetastoreAssignment func LookupMwsWorkspaces(ctx *pulumi.Context, args *LookupMwsWorkspacesArgs, opts ...pulumi.InvokeOption) (*LookupMwsWorkspacesResult, error) { opts = internal.PkgInvokeDefaultOpts(opts) diff --git a/sdk/go/databricks/ipAccessList.go b/sdk/go/databricks/ipAccessList.go index 370d7556..d964e07c 100644 --- a/sdk/go/databricks/ipAccessList.go +++ b/sdk/go/databricks/ipAccessList.go @@ -63,7 +63,7 @@ import ( // The following resources are often used in the same context: // // * End to end workspace management guide. -// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. +// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. // * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. // * MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). // * Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. diff --git a/sdk/go/databricks/mwsCredentials.go b/sdk/go/databricks/mwsCredentials.go index fd1e116e..f6d3adf3 100644 --- a/sdk/go/databricks/mwsCredentials.go +++ b/sdk/go/databricks/mwsCredentials.go @@ -29,7 +29,7 @@ import ( // * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). // * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. // * MwsStorageConfigurations to configure root bucket new workspaces within AWS. -// * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). +// * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). // // ## Import // diff --git a/sdk/go/databricks/mwsCustomerManagedKeys.go b/sdk/go/databricks/mwsCustomerManagedKeys.go index 605a348d..fa89cf90 100644 --- a/sdk/go/databricks/mwsCustomerManagedKeys.go +++ b/sdk/go/databricks/mwsCustomerManagedKeys.go @@ -366,7 +366,7 @@ import ( // * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). // * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. // * MwsStorageConfigurations to configure root bucket new workspaces within AWS. -// * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). +// * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). // // ## Import // diff --git a/sdk/go/databricks/mwsLogDelivery.go b/sdk/go/databricks/mwsLogDelivery.go index 272a6f36..f63246af 100644 --- a/sdk/go/databricks/mwsLogDelivery.go +++ b/sdk/go/databricks/mwsLogDelivery.go @@ -97,7 +97,7 @@ import ( // * MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. // * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. // * MwsStorageConfigurations to configure root bucket new workspaces within AWS. -// * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). +// * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). // // ## Import // @@ -125,7 +125,7 @@ type MwsLogDelivery struct { Status pulumi.StringOutput `pulumi:"status"` // The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. StorageConfigurationId pulumi.StringOutput `pulumi:"storageConfigurationId"` - // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. WorkspaceIdsFilters pulumi.IntArrayOutput `pulumi:"workspaceIdsFilters"` } @@ -194,7 +194,7 @@ type mwsLogDeliveryState struct { Status *string `pulumi:"status"` // The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. StorageConfigurationId *string `pulumi:"storageConfigurationId"` - // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. WorkspaceIdsFilters []int `pulumi:"workspaceIdsFilters"` } @@ -219,7 +219,7 @@ type MwsLogDeliveryState struct { Status pulumi.StringPtrInput // The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. StorageConfigurationId pulumi.StringPtrInput - // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. WorkspaceIdsFilters pulumi.IntArrayInput } @@ -248,7 +248,7 @@ type mwsLogDeliveryArgs struct { Status *string `pulumi:"status"` // The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. StorageConfigurationId string `pulumi:"storageConfigurationId"` - // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. WorkspaceIdsFilters []int `pulumi:"workspaceIdsFilters"` } @@ -274,7 +274,7 @@ type MwsLogDeliveryArgs struct { Status pulumi.StringPtrInput // The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. StorageConfigurationId pulumi.StringInput - // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + // By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. WorkspaceIdsFilters pulumi.IntArrayInput } @@ -415,7 +415,7 @@ func (o MwsLogDeliveryOutput) StorageConfigurationId() pulumi.StringOutput { return o.ApplyT(func(v *MwsLogDelivery) pulumi.StringOutput { return v.StorageConfigurationId }).(pulumi.StringOutput) } -// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. +// By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. func (o MwsLogDeliveryOutput) WorkspaceIdsFilters() pulumi.IntArrayOutput { return o.ApplyT(func(v *MwsLogDelivery) pulumi.IntArrayOutput { return v.WorkspaceIdsFilters }).(pulumi.IntArrayOutput) } diff --git a/sdk/go/databricks/mwsNetworks.go b/sdk/go/databricks/mwsNetworks.go index 3f61bbe0..a9c319ab 100644 --- a/sdk/go/databricks/mwsNetworks.go +++ b/sdk/go/databricks/mwsNetworks.go @@ -75,13 +75,13 @@ import ( // The following resources are used in the same context: // // * Provisioning Databricks on AWS guide. -// * Provisioning Databricks on AWS with PrivateLink guide. -// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. +// * Provisioning Databricks on AWS with Private Link guide. +// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. // * Provisioning Databricks on GCP guide. // * Provisioning Databricks workspaces on GCP with Private Service Connect guide. // * MwsVpcEndpoint resources with Databricks such that they can be used as part of a MwsNetworks configuration. // * MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html). -// * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). +// * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). // // ## Import // diff --git a/sdk/go/databricks/mwsPrivateAccessSettings.go b/sdk/go/databricks/mwsPrivateAccessSettings.go index 0f4905ad..7ca07197 100644 --- a/sdk/go/databricks/mwsPrivateAccessSettings.go +++ b/sdk/go/databricks/mwsPrivateAccessSettings.go @@ -131,12 +131,12 @@ import ( // The following resources are used in the same context: // // * Provisioning Databricks on AWS guide. -// * Provisioning Databricks on AWS with PrivateLink guide. -// * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. +// * Provisioning Databricks on AWS with Private Link guide. +// * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. // * Provisioning Databricks workspaces on GCP with Private Service Connect guide. // * MwsVpcEndpoint resources with Databricks such that they can be used as part of a MwsNetworks configuration. // * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. -// * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). +// * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). // // ## Import // diff --git a/sdk/go/databricks/mwsStorageConfigurations.go b/sdk/go/databricks/mwsStorageConfigurations.go index 6ccf5e08..916e5c19 100644 --- a/sdk/go/databricks/mwsStorageConfigurations.go +++ b/sdk/go/databricks/mwsStorageConfigurations.go @@ -26,12 +26,12 @@ import ( // The following resources are used in the same context: // // * Provisioning Databricks on AWS guide. -// * Provisioning Databricks on AWS with PrivateLink guide. +// * Provisioning Databricks on AWS with Private Link guide. // * MwsCredentials to configure the cross-account role for creation of new workspaces within AWS. // * MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. // * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). // * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. -// * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). +// * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). // // ## Import // diff --git a/sdk/go/databricks/onlineTable.go b/sdk/go/databricks/onlineTable.go index badee6ef..ad771b3f 100644 --- a/sdk/go/databricks/onlineTable.go +++ b/sdk/go/databricks/onlineTable.go @@ -65,7 +65,8 @@ type OnlineTable struct { // object containing specification of the online table: Spec OnlineTableSpecPtrOutput `pulumi:"spec"` // object describing status of the online table: - Statuses OnlineTableStatusArrayOutput `pulumi:"statuses"` + Statuses OnlineTableStatusArrayOutput `pulumi:"statuses"` + TableServingUrl pulumi.StringPtrOutput `pulumi:"tableServingUrl"` } // NewOnlineTable registers a new resource with the given unique name, arguments, and options. @@ -103,7 +104,8 @@ type onlineTableState struct { // object containing specification of the online table: Spec *OnlineTableSpec `pulumi:"spec"` // object describing status of the online table: - Statuses []OnlineTableStatus `pulumi:"statuses"` + Statuses []OnlineTableStatus `pulumi:"statuses"` + TableServingUrl *string `pulumi:"tableServingUrl"` } type OnlineTableState struct { @@ -112,7 +114,8 @@ type OnlineTableState struct { // object containing specification of the online table: Spec OnlineTableSpecPtrInput // object describing status of the online table: - Statuses OnlineTableStatusArrayInput + Statuses OnlineTableStatusArrayInput + TableServingUrl pulumi.StringPtrInput } func (OnlineTableState) ElementType() reflect.Type { @@ -123,7 +126,8 @@ type onlineTableArgs struct { // 3-level name of the Online Table to create. Name *string `pulumi:"name"` // object containing specification of the online table: - Spec *OnlineTableSpec `pulumi:"spec"` + Spec *OnlineTableSpec `pulumi:"spec"` + TableServingUrl *string `pulumi:"tableServingUrl"` } // The set of arguments for constructing a OnlineTable resource. @@ -131,7 +135,8 @@ type OnlineTableArgs struct { // 3-level name of the Online Table to create. Name pulumi.StringPtrInput // object containing specification of the online table: - Spec OnlineTableSpecPtrInput + Spec OnlineTableSpecPtrInput + TableServingUrl pulumi.StringPtrInput } func (OnlineTableArgs) ElementType() reflect.Type { @@ -236,6 +241,10 @@ func (o OnlineTableOutput) Statuses() OnlineTableStatusArrayOutput { return o.ApplyT(func(v *OnlineTable) OnlineTableStatusArrayOutput { return v.Statuses }).(OnlineTableStatusArrayOutput) } +func (o OnlineTableOutput) TableServingUrl() pulumi.StringPtrOutput { + return o.ApplyT(func(v *OnlineTable) pulumi.StringPtrOutput { return v.TableServingUrl }).(pulumi.StringPtrOutput) +} + type OnlineTableArrayOutput struct{ *pulumi.OutputState } func (OnlineTableArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/provider.go b/sdk/go/databricks/provider.go index 07054ce6..e9bf33e7 100644 --- a/sdk/go/databricks/provider.go +++ b/sdk/go/databricks/provider.go @@ -37,6 +37,7 @@ type Provider struct { MetadataServiceUrl pulumi.StringPtrOutput `pulumi:"metadataServiceUrl"` Password pulumi.StringPtrOutput `pulumi:"password"` Profile pulumi.StringPtrOutput `pulumi:"profile"` + ServerlessComputeId pulumi.StringPtrOutput `pulumi:"serverlessComputeId"` Token pulumi.StringPtrOutput `pulumi:"token"` Username pulumi.StringPtrOutput `pulumi:"username"` WarehouseId pulumi.StringPtrOutput `pulumi:"warehouseId"` @@ -111,6 +112,7 @@ type providerArgs struct { Profile *string `pulumi:"profile"` RateLimit *int `pulumi:"rateLimit"` RetryTimeoutSeconds *int `pulumi:"retryTimeoutSeconds"` + ServerlessComputeId *string `pulumi:"serverlessComputeId"` SkipVerify *bool `pulumi:"skipVerify"` Token *string `pulumi:"token"` Username *string `pulumi:"username"` @@ -144,6 +146,7 @@ type ProviderArgs struct { Profile pulumi.StringPtrInput RateLimit pulumi.IntPtrInput RetryTimeoutSeconds pulumi.IntPtrInput + ServerlessComputeId pulumi.StringPtrInput SkipVerify pulumi.BoolPtrInput Token pulumi.StringPtrInput Username pulumi.StringPtrInput @@ -263,6 +266,10 @@ func (o ProviderOutput) Profile() pulumi.StringPtrOutput { return o.ApplyT(func(v *Provider) pulumi.StringPtrOutput { return v.Profile }).(pulumi.StringPtrOutput) } +func (o ProviderOutput) ServerlessComputeId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Provider) pulumi.StringPtrOutput { return v.ServerlessComputeId }).(pulumi.StringPtrOutput) +} + func (o ProviderOutput) Token() pulumi.StringPtrOutput { return o.ApplyT(func(v *Provider) pulumi.StringPtrOutput { return v.Token }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/databricks/pulumiTypes.go b/sdk/go/databricks/pulumiTypes.go index 4200283d..84441012 100644 --- a/sdk/go/databricks/pulumiTypes.go +++ b/sdk/go/databricks/pulumiTypes.go @@ -9477,7 +9477,8 @@ type JobEmailNotifications struct { // (List) list of emails to notify when the run fails. OnFailures []string `pulumi:"onFailures"` // (List) list of emails to notify when the run starts. - OnStarts []string `pulumi:"onStarts"` + OnStarts []string `pulumi:"onStarts"` + OnStreamingBacklogExceededs []string `pulumi:"onStreamingBacklogExceededs"` // (List) list of emails to notify when the run completes successfully. OnSuccesses []string `pulumi:"onSuccesses"` } @@ -9503,7 +9504,8 @@ type JobEmailNotificationsArgs struct { // (List) list of emails to notify when the run fails. OnFailures pulumi.StringArrayInput `pulumi:"onFailures"` // (List) list of emails to notify when the run starts. - OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs pulumi.StringArrayInput `pulumi:"onStreamingBacklogExceededs"` // (List) list of emails to notify when the run completes successfully. OnSuccesses pulumi.StringArrayInput `pulumi:"onSuccesses"` } @@ -9607,6 +9609,10 @@ func (o JobEmailNotificationsOutput) OnStarts() pulumi.StringArrayOutput { return o.ApplyT(func(v JobEmailNotifications) []string { return v.OnStarts }).(pulumi.StringArrayOutput) } +func (o JobEmailNotificationsOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v JobEmailNotifications) []string { return v.OnStreamingBacklogExceededs }).(pulumi.StringArrayOutput) +} + // (List) list of emails to notify when the run completes successfully. func (o JobEmailNotificationsOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v JobEmailNotifications) []string { return v.OnSuccesses }).(pulumi.StringArrayOutput) @@ -9678,6 +9684,15 @@ func (o JobEmailNotificationsPtrOutput) OnStarts() pulumi.StringArrayOutput { }).(pulumi.StringArrayOutput) } +func (o JobEmailNotificationsPtrOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v *JobEmailNotifications) []string { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(pulumi.StringArrayOutput) +} + // (List) list of emails to notify when the run completes successfully. func (o JobEmailNotificationsPtrOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v *JobEmailNotifications) []string { @@ -23044,7 +23059,8 @@ type JobTaskEmailNotifications struct { // (List) list of emails to notify when the run fails. OnFailures []string `pulumi:"onFailures"` // (List) list of emails to notify when the run starts. - OnStarts []string `pulumi:"onStarts"` + OnStarts []string `pulumi:"onStarts"` + OnStreamingBacklogExceededs []string `pulumi:"onStreamingBacklogExceededs"` // (List) list of emails to notify when the run completes successfully. OnSuccesses []string `pulumi:"onSuccesses"` } @@ -23070,7 +23086,8 @@ type JobTaskEmailNotificationsArgs struct { // (List) list of emails to notify when the run fails. OnFailures pulumi.StringArrayInput `pulumi:"onFailures"` // (List) list of emails to notify when the run starts. - OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs pulumi.StringArrayInput `pulumi:"onStreamingBacklogExceededs"` // (List) list of emails to notify when the run completes successfully. OnSuccesses pulumi.StringArrayInput `pulumi:"onSuccesses"` } @@ -23174,6 +23191,10 @@ func (o JobTaskEmailNotificationsOutput) OnStarts() pulumi.StringArrayOutput { return o.ApplyT(func(v JobTaskEmailNotifications) []string { return v.OnStarts }).(pulumi.StringArrayOutput) } +func (o JobTaskEmailNotificationsOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v JobTaskEmailNotifications) []string { return v.OnStreamingBacklogExceededs }).(pulumi.StringArrayOutput) +} + // (List) list of emails to notify when the run completes successfully. func (o JobTaskEmailNotificationsOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v JobTaskEmailNotifications) []string { return v.OnSuccesses }).(pulumi.StringArrayOutput) @@ -23245,6 +23266,15 @@ func (o JobTaskEmailNotificationsPtrOutput) OnStarts() pulumi.StringArrayOutput }).(pulumi.StringArrayOutput) } +func (o JobTaskEmailNotificationsPtrOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v *JobTaskEmailNotifications) []string { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(pulumi.StringArrayOutput) +} + // (List) list of emails to notify when the run completes successfully. func (o JobTaskEmailNotificationsPtrOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v *JobTaskEmailNotifications) []string { @@ -24611,7 +24641,8 @@ type JobTaskForEachTaskTaskEmailNotifications struct { // (List) list of emails to notify when the run fails. OnFailures []string `pulumi:"onFailures"` // (List) list of emails to notify when the run starts. - OnStarts []string `pulumi:"onStarts"` + OnStarts []string `pulumi:"onStarts"` + OnStreamingBacklogExceededs []string `pulumi:"onStreamingBacklogExceededs"` // (List) list of emails to notify when the run completes successfully. OnSuccesses []string `pulumi:"onSuccesses"` } @@ -24637,7 +24668,8 @@ type JobTaskForEachTaskTaskEmailNotificationsArgs struct { // (List) list of emails to notify when the run fails. OnFailures pulumi.StringArrayInput `pulumi:"onFailures"` // (List) list of emails to notify when the run starts. - OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs pulumi.StringArrayInput `pulumi:"onStreamingBacklogExceededs"` // (List) list of emails to notify when the run completes successfully. OnSuccesses pulumi.StringArrayInput `pulumi:"onSuccesses"` } @@ -24743,6 +24775,10 @@ func (o JobTaskForEachTaskTaskEmailNotificationsOutput) OnStarts() pulumi.String return o.ApplyT(func(v JobTaskForEachTaskTaskEmailNotifications) []string { return v.OnStarts }).(pulumi.StringArrayOutput) } +func (o JobTaskForEachTaskTaskEmailNotificationsOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v JobTaskForEachTaskTaskEmailNotifications) []string { return v.OnStreamingBacklogExceededs }).(pulumi.StringArrayOutput) +} + // (List) list of emails to notify when the run completes successfully. func (o JobTaskForEachTaskTaskEmailNotificationsOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v JobTaskForEachTaskTaskEmailNotifications) []string { return v.OnSuccesses }).(pulumi.StringArrayOutput) @@ -24814,6 +24850,15 @@ func (o JobTaskForEachTaskTaskEmailNotificationsPtrOutput) OnStarts() pulumi.Str }).(pulumi.StringArrayOutput) } +func (o JobTaskForEachTaskTaskEmailNotificationsPtrOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v *JobTaskForEachTaskTaskEmailNotifications) []string { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(pulumi.StringArrayOutput) +} + // (List) list of emails to notify when the run completes successfully. func (o JobTaskForEachTaskTaskEmailNotificationsPtrOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v *JobTaskForEachTaskTaskEmailNotifications) []string { @@ -33221,7 +33266,8 @@ type JobTaskForEachTaskTaskWebhookNotifications struct { // (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified. OnFailures []JobTaskForEachTaskTaskWebhookNotificationsOnFailure `pulumi:"onFailures"` // (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. - OnStarts []JobTaskForEachTaskTaskWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStarts []JobTaskForEachTaskTaskWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStreamingBacklogExceededs []JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `pulumi:"onStreamingBacklogExceededs"` // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. OnSuccesses []JobTaskForEachTaskTaskWebhookNotificationsOnSuccess `pulumi:"onSuccesses"` } @@ -33247,7 +33293,8 @@ type JobTaskForEachTaskTaskWebhookNotificationsArgs struct { // (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified. OnFailures JobTaskForEachTaskTaskWebhookNotificationsOnFailureArrayInput `pulumi:"onFailures"` // (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. - OnStarts JobTaskForEachTaskTaskWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStarts JobTaskForEachTaskTaskWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput `pulumi:"onStreamingBacklogExceededs"` // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. OnSuccesses JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayInput `pulumi:"onSuccesses"` } @@ -33354,6 +33401,12 @@ func (o JobTaskForEachTaskTaskWebhookNotificationsOutput) OnStarts() JobTaskForE }).(JobTaskForEachTaskTaskWebhookNotificationsOnStartArrayOutput) } +func (o JobTaskForEachTaskTaskWebhookNotificationsOutput) OnStreamingBacklogExceededs() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v JobTaskForEachTaskTaskWebhookNotifications) []JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + return v.OnStreamingBacklogExceededs + }).(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. func (o JobTaskForEachTaskTaskWebhookNotificationsOutput) OnSuccesses() JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v JobTaskForEachTaskTaskWebhookNotifications) []JobTaskForEachTaskTaskWebhookNotificationsOnSuccess { @@ -33419,6 +33472,15 @@ func (o JobTaskForEachTaskTaskWebhookNotificationsPtrOutput) OnStarts() JobTaskF }).(JobTaskForEachTaskTaskWebhookNotificationsOnStartArrayOutput) } +func (o JobTaskForEachTaskTaskWebhookNotificationsPtrOutput) OnStreamingBacklogExceededs() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v *JobTaskForEachTaskTaskWebhookNotifications) []JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. func (o JobTaskForEachTaskTaskWebhookNotificationsPtrOutput) OnSuccesses() JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v *JobTaskForEachTaskTaskWebhookNotifications) []JobTaskForEachTaskTaskWebhookNotificationsOnSuccess { @@ -33722,6 +33784,103 @@ func (o JobTaskForEachTaskTaskWebhookNotificationsOnStartArrayOutput) Index(i pu }).(JobTaskForEachTaskTaskWebhookNotificationsOnStartOutput) } +type JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + // ID of the job + Id string `pulumi:"id"` +} + +// JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput is an input type that accepts JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs and JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput values. +// You can construct a concrete instance of `JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput` via: +// +// JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} +type JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput interface { + pulumi.Input + + ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput + ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Context) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput +} + +type JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs struct { + // ID of the job + Id pulumi.StringInput `pulumi:"id"` +} + +func (JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ElementType() reflect.Type { + return reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return i.ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Background()) +} + +func (i JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + +// JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput is an input type that accepts JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray and JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput values. +// You can construct a concrete instance of `JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput` via: +// +// JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray{ JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} } +type JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput interface { + pulumi.Input + + ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput + ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Context) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput +} + +type JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray []JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput + +func (JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return i.ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Background()) +} + +func (i JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + +type JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput struct{ *pulumi.OutputState } + +func (JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ElementType() reflect.Type { + return reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +func (o JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +// ID of the job +func (o JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded) string { return v.Id }).(pulumi.StringOutput) +} + +type JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput struct{ *pulumi.OutputState } + +func (JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) Index(i pulumi.IntInput) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + return vs[0].([]JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)[vs[1].(int)] + }).(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + type JobTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { // ID of the job Id string `pulumi:"id"` @@ -42156,7 +42315,8 @@ type JobTaskWebhookNotifications struct { // (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified. OnFailures []JobTaskWebhookNotificationsOnFailure `pulumi:"onFailures"` // (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. - OnStarts []JobTaskWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStarts []JobTaskWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStreamingBacklogExceededs []JobTaskWebhookNotificationsOnStreamingBacklogExceeded `pulumi:"onStreamingBacklogExceededs"` // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. OnSuccesses []JobTaskWebhookNotificationsOnSuccess `pulumi:"onSuccesses"` } @@ -42182,7 +42342,8 @@ type JobTaskWebhookNotificationsArgs struct { // (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified. OnFailures JobTaskWebhookNotificationsOnFailureArrayInput `pulumi:"onFailures"` // (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. - OnStarts JobTaskWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStarts JobTaskWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput `pulumi:"onStreamingBacklogExceededs"` // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. OnSuccesses JobTaskWebhookNotificationsOnSuccessArrayInput `pulumi:"onSuccesses"` } @@ -42285,6 +42446,12 @@ func (o JobTaskWebhookNotificationsOutput) OnStarts() JobTaskWebhookNotification return o.ApplyT(func(v JobTaskWebhookNotifications) []JobTaskWebhookNotificationsOnStart { return v.OnStarts }).(JobTaskWebhookNotificationsOnStartArrayOutput) } +func (o JobTaskWebhookNotificationsOutput) OnStreamingBacklogExceededs() JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v JobTaskWebhookNotifications) []JobTaskWebhookNotificationsOnStreamingBacklogExceeded { + return v.OnStreamingBacklogExceededs + }).(JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. func (o JobTaskWebhookNotificationsOutput) OnSuccesses() JobTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v JobTaskWebhookNotifications) []JobTaskWebhookNotificationsOnSuccess { return v.OnSuccesses }).(JobTaskWebhookNotificationsOnSuccessArrayOutput) @@ -42348,6 +42515,15 @@ func (o JobTaskWebhookNotificationsPtrOutput) OnStarts() JobTaskWebhookNotificat }).(JobTaskWebhookNotificationsOnStartArrayOutput) } +func (o JobTaskWebhookNotificationsPtrOutput) OnStreamingBacklogExceededs() JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v *JobTaskWebhookNotifications) []JobTaskWebhookNotificationsOnStreamingBacklogExceeded { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. func (o JobTaskWebhookNotificationsPtrOutput) OnSuccesses() JobTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v *JobTaskWebhookNotifications) []JobTaskWebhookNotificationsOnSuccess { @@ -42649,6 +42825,103 @@ func (o JobTaskWebhookNotificationsOnStartArrayOutput) Index(i pulumi.IntInput) }).(JobTaskWebhookNotificationsOnStartOutput) } +type JobTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + // ID of the job + Id string `pulumi:"id"` +} + +// JobTaskWebhookNotificationsOnStreamingBacklogExceededInput is an input type that accepts JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs and JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput values. +// You can construct a concrete instance of `JobTaskWebhookNotificationsOnStreamingBacklogExceededInput` via: +// +// JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} +type JobTaskWebhookNotificationsOnStreamingBacklogExceededInput interface { + pulumi.Input + + ToJobTaskWebhookNotificationsOnStreamingBacklogExceededOutput() JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput + ToJobTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Context) JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput +} + +type JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs struct { + // ID of the job + Id pulumi.StringInput `pulumi:"id"` +} + +func (JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ElementType() reflect.Type { + return reflect.TypeOf((*JobTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededOutput() JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return i.ToJobTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Background()) +} + +func (i JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + +// JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput is an input type that accepts JobTaskWebhookNotificationsOnStreamingBacklogExceededArray and JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput values. +// You can construct a concrete instance of `JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput` via: +// +// JobTaskWebhookNotificationsOnStreamingBacklogExceededArray{ JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} } +type JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput interface { + pulumi.Input + + ToJobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput + ToJobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Context) JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput +} + +type JobTaskWebhookNotificationsOnStreamingBacklogExceededArray []JobTaskWebhookNotificationsOnStreamingBacklogExceededInput + +func (JobTaskWebhookNotificationsOnStreamingBacklogExceededArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i JobTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return i.ToJobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Background()) +} + +func (i JobTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + +type JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput struct{ *pulumi.OutputState } + +func (JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ElementType() reflect.Type { + return reflect.TypeOf((*JobTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededOutput() JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +func (o JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +// ID of the job +func (o JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v JobTaskWebhookNotificationsOnStreamingBacklogExceeded) string { return v.Id }).(pulumi.StringOutput) +} + +type JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput struct{ *pulumi.OutputState } + +func (JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToJobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) Index(i pulumi.IntInput) JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) JobTaskWebhookNotificationsOnStreamingBacklogExceeded { + return vs[0].([]JobTaskWebhookNotificationsOnStreamingBacklogExceeded)[vs[1].(int)] + }).(JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + type JobTaskWebhookNotificationsOnSuccess struct { // ID of the job Id string `pulumi:"id"` @@ -42751,6 +43024,7 @@ type JobTrigger struct { FileArrival *JobTriggerFileArrival `pulumi:"fileArrival"` // Indicate whether this trigger is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pauseStatus` field is omitted in the block, the server will default to using `UNPAUSED` as a value for `pauseStatus`. PauseStatus *string `pulumi:"pauseStatus"` + Periodic *JobTriggerPeriodic `pulumi:"periodic"` Table *JobTriggerTable `pulumi:"table"` TableUpdate *JobTriggerTableUpdate `pulumi:"tableUpdate"` } @@ -42771,6 +43045,7 @@ type JobTriggerArgs struct { FileArrival JobTriggerFileArrivalPtrInput `pulumi:"fileArrival"` // Indicate whether this trigger is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pauseStatus` field is omitted in the block, the server will default to using `UNPAUSED` as a value for `pauseStatus`. PauseStatus pulumi.StringPtrInput `pulumi:"pauseStatus"` + Periodic JobTriggerPeriodicPtrInput `pulumi:"periodic"` Table JobTriggerTablePtrInput `pulumi:"table"` TableUpdate JobTriggerTableUpdatePtrInput `pulumi:"tableUpdate"` } @@ -42862,6 +43137,10 @@ func (o JobTriggerOutput) PauseStatus() pulumi.StringPtrOutput { return o.ApplyT(func(v JobTrigger) *string { return v.PauseStatus }).(pulumi.StringPtrOutput) } +func (o JobTriggerOutput) Periodic() JobTriggerPeriodicPtrOutput { + return o.ApplyT(func(v JobTrigger) *JobTriggerPeriodic { return v.Periodic }).(JobTriggerPeriodicPtrOutput) +} + func (o JobTriggerOutput) Table() JobTriggerTablePtrOutput { return o.ApplyT(func(v JobTrigger) *JobTriggerTable { return v.Table }).(JobTriggerTablePtrOutput) } @@ -42914,6 +43193,15 @@ func (o JobTriggerPtrOutput) PauseStatus() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +func (o JobTriggerPtrOutput) Periodic() JobTriggerPeriodicPtrOutput { + return o.ApplyT(func(v *JobTrigger) *JobTriggerPeriodic { + if v == nil { + return nil + } + return v.Periodic + }).(JobTriggerPeriodicPtrOutput) +} + func (o JobTriggerPtrOutput) Table() JobTriggerTablePtrOutput { return o.ApplyT(func(v *JobTrigger) *JobTriggerTable { if v == nil { @@ -43107,6 +43395,154 @@ func (o JobTriggerFileArrivalPtrOutput) WaitAfterLastChangeSeconds() pulumi.IntP }).(pulumi.IntPtrOutput) } +type JobTriggerPeriodic struct { + Interval int `pulumi:"interval"` + Unit string `pulumi:"unit"` +} + +// JobTriggerPeriodicInput is an input type that accepts JobTriggerPeriodicArgs and JobTriggerPeriodicOutput values. +// You can construct a concrete instance of `JobTriggerPeriodicInput` via: +// +// JobTriggerPeriodicArgs{...} +type JobTriggerPeriodicInput interface { + pulumi.Input + + ToJobTriggerPeriodicOutput() JobTriggerPeriodicOutput + ToJobTriggerPeriodicOutputWithContext(context.Context) JobTriggerPeriodicOutput +} + +type JobTriggerPeriodicArgs struct { + Interval pulumi.IntInput `pulumi:"interval"` + Unit pulumi.StringInput `pulumi:"unit"` +} + +func (JobTriggerPeriodicArgs) ElementType() reflect.Type { + return reflect.TypeOf((*JobTriggerPeriodic)(nil)).Elem() +} + +func (i JobTriggerPeriodicArgs) ToJobTriggerPeriodicOutput() JobTriggerPeriodicOutput { + return i.ToJobTriggerPeriodicOutputWithContext(context.Background()) +} + +func (i JobTriggerPeriodicArgs) ToJobTriggerPeriodicOutputWithContext(ctx context.Context) JobTriggerPeriodicOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTriggerPeriodicOutput) +} + +func (i JobTriggerPeriodicArgs) ToJobTriggerPeriodicPtrOutput() JobTriggerPeriodicPtrOutput { + return i.ToJobTriggerPeriodicPtrOutputWithContext(context.Background()) +} + +func (i JobTriggerPeriodicArgs) ToJobTriggerPeriodicPtrOutputWithContext(ctx context.Context) JobTriggerPeriodicPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTriggerPeriodicOutput).ToJobTriggerPeriodicPtrOutputWithContext(ctx) +} + +// JobTriggerPeriodicPtrInput is an input type that accepts JobTriggerPeriodicArgs, JobTriggerPeriodicPtr and JobTriggerPeriodicPtrOutput values. +// You can construct a concrete instance of `JobTriggerPeriodicPtrInput` via: +// +// JobTriggerPeriodicArgs{...} +// +// or: +// +// nil +type JobTriggerPeriodicPtrInput interface { + pulumi.Input + + ToJobTriggerPeriodicPtrOutput() JobTriggerPeriodicPtrOutput + ToJobTriggerPeriodicPtrOutputWithContext(context.Context) JobTriggerPeriodicPtrOutput +} + +type jobTriggerPeriodicPtrType JobTriggerPeriodicArgs + +func JobTriggerPeriodicPtr(v *JobTriggerPeriodicArgs) JobTriggerPeriodicPtrInput { + return (*jobTriggerPeriodicPtrType)(v) +} + +func (*jobTriggerPeriodicPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**JobTriggerPeriodic)(nil)).Elem() +} + +func (i *jobTriggerPeriodicPtrType) ToJobTriggerPeriodicPtrOutput() JobTriggerPeriodicPtrOutput { + return i.ToJobTriggerPeriodicPtrOutputWithContext(context.Background()) +} + +func (i *jobTriggerPeriodicPtrType) ToJobTriggerPeriodicPtrOutputWithContext(ctx context.Context) JobTriggerPeriodicPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTriggerPeriodicPtrOutput) +} + +type JobTriggerPeriodicOutput struct{ *pulumi.OutputState } + +func (JobTriggerPeriodicOutput) ElementType() reflect.Type { + return reflect.TypeOf((*JobTriggerPeriodic)(nil)).Elem() +} + +func (o JobTriggerPeriodicOutput) ToJobTriggerPeriodicOutput() JobTriggerPeriodicOutput { + return o +} + +func (o JobTriggerPeriodicOutput) ToJobTriggerPeriodicOutputWithContext(ctx context.Context) JobTriggerPeriodicOutput { + return o +} + +func (o JobTriggerPeriodicOutput) ToJobTriggerPeriodicPtrOutput() JobTriggerPeriodicPtrOutput { + return o.ToJobTriggerPeriodicPtrOutputWithContext(context.Background()) +} + +func (o JobTriggerPeriodicOutput) ToJobTriggerPeriodicPtrOutputWithContext(ctx context.Context) JobTriggerPeriodicPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v JobTriggerPeriodic) *JobTriggerPeriodic { + return &v + }).(JobTriggerPeriodicPtrOutput) +} + +func (o JobTriggerPeriodicOutput) Interval() pulumi.IntOutput { + return o.ApplyT(func(v JobTriggerPeriodic) int { return v.Interval }).(pulumi.IntOutput) +} + +func (o JobTriggerPeriodicOutput) Unit() pulumi.StringOutput { + return o.ApplyT(func(v JobTriggerPeriodic) string { return v.Unit }).(pulumi.StringOutput) +} + +type JobTriggerPeriodicPtrOutput struct{ *pulumi.OutputState } + +func (JobTriggerPeriodicPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**JobTriggerPeriodic)(nil)).Elem() +} + +func (o JobTriggerPeriodicPtrOutput) ToJobTriggerPeriodicPtrOutput() JobTriggerPeriodicPtrOutput { + return o +} + +func (o JobTriggerPeriodicPtrOutput) ToJobTriggerPeriodicPtrOutputWithContext(ctx context.Context) JobTriggerPeriodicPtrOutput { + return o +} + +func (o JobTriggerPeriodicPtrOutput) Elem() JobTriggerPeriodicOutput { + return o.ApplyT(func(v *JobTriggerPeriodic) JobTriggerPeriodic { + if v != nil { + return *v + } + var ret JobTriggerPeriodic + return ret + }).(JobTriggerPeriodicOutput) +} + +func (o JobTriggerPeriodicPtrOutput) Interval() pulumi.IntPtrOutput { + return o.ApplyT(func(v *JobTriggerPeriodic) *int { + if v == nil { + return nil + } + return &v.Interval + }).(pulumi.IntPtrOutput) +} + +func (o JobTriggerPeriodicPtrOutput) Unit() pulumi.StringPtrOutput { + return o.ApplyT(func(v *JobTriggerPeriodic) *string { + if v == nil { + return nil + } + return &v.Unit + }).(pulumi.StringPtrOutput) +} + type JobTriggerTable struct { Condition *string `pulumi:"condition"` MinTimeBetweenTriggersSeconds *int `pulumi:"minTimeBetweenTriggersSeconds"` @@ -43473,7 +43909,8 @@ type JobWebhookNotifications struct { // (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified. OnFailures []JobWebhookNotificationsOnFailure `pulumi:"onFailures"` // (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. - OnStarts []JobWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStarts []JobWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStreamingBacklogExceededs []JobWebhookNotificationsOnStreamingBacklogExceeded `pulumi:"onStreamingBacklogExceededs"` // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. OnSuccesses []JobWebhookNotificationsOnSuccess `pulumi:"onSuccesses"` } @@ -43499,7 +43936,8 @@ type JobWebhookNotificationsArgs struct { // (List) list of notification IDs to call when the run fails. A maximum of 3 destinations can be specified. OnFailures JobWebhookNotificationsOnFailureArrayInput `pulumi:"onFailures"` // (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. - OnStarts JobWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStarts JobWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs JobWebhookNotificationsOnStreamingBacklogExceededArrayInput `pulumi:"onStreamingBacklogExceededs"` // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. OnSuccesses JobWebhookNotificationsOnSuccessArrayInput `pulumi:"onSuccesses"` } @@ -43602,6 +44040,12 @@ func (o JobWebhookNotificationsOutput) OnStarts() JobWebhookNotificationsOnStart return o.ApplyT(func(v JobWebhookNotifications) []JobWebhookNotificationsOnStart { return v.OnStarts }).(JobWebhookNotificationsOnStartArrayOutput) } +func (o JobWebhookNotificationsOutput) OnStreamingBacklogExceededs() JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v JobWebhookNotifications) []JobWebhookNotificationsOnStreamingBacklogExceeded { + return v.OnStreamingBacklogExceededs + }).(JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. func (o JobWebhookNotificationsOutput) OnSuccesses() JobWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v JobWebhookNotifications) []JobWebhookNotificationsOnSuccess { return v.OnSuccesses }).(JobWebhookNotificationsOnSuccessArrayOutput) @@ -43665,6 +44109,15 @@ func (o JobWebhookNotificationsPtrOutput) OnStarts() JobWebhookNotificationsOnSt }).(JobWebhookNotificationsOnStartArrayOutput) } +func (o JobWebhookNotificationsPtrOutput) OnStreamingBacklogExceededs() JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v *JobWebhookNotifications) []JobWebhookNotificationsOnStreamingBacklogExceeded { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + // (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. func (o JobWebhookNotificationsPtrOutput) OnSuccesses() JobWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v *JobWebhookNotifications) []JobWebhookNotificationsOnSuccess { @@ -43966,6 +44419,103 @@ func (o JobWebhookNotificationsOnStartArrayOutput) Index(i pulumi.IntInput) JobW }).(JobWebhookNotificationsOnStartOutput) } +type JobWebhookNotificationsOnStreamingBacklogExceeded struct { + // ID of the job + Id string `pulumi:"id"` +} + +// JobWebhookNotificationsOnStreamingBacklogExceededInput is an input type that accepts JobWebhookNotificationsOnStreamingBacklogExceededArgs and JobWebhookNotificationsOnStreamingBacklogExceededOutput values. +// You can construct a concrete instance of `JobWebhookNotificationsOnStreamingBacklogExceededInput` via: +// +// JobWebhookNotificationsOnStreamingBacklogExceededArgs{...} +type JobWebhookNotificationsOnStreamingBacklogExceededInput interface { + pulumi.Input + + ToJobWebhookNotificationsOnStreamingBacklogExceededOutput() JobWebhookNotificationsOnStreamingBacklogExceededOutput + ToJobWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Context) JobWebhookNotificationsOnStreamingBacklogExceededOutput +} + +type JobWebhookNotificationsOnStreamingBacklogExceededArgs struct { + // ID of the job + Id pulumi.StringInput `pulumi:"id"` +} + +func (JobWebhookNotificationsOnStreamingBacklogExceededArgs) ElementType() reflect.Type { + return reflect.TypeOf((*JobWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i JobWebhookNotificationsOnStreamingBacklogExceededArgs) ToJobWebhookNotificationsOnStreamingBacklogExceededOutput() JobWebhookNotificationsOnStreamingBacklogExceededOutput { + return i.ToJobWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Background()) +} + +func (i JobWebhookNotificationsOnStreamingBacklogExceededArgs) ToJobWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) JobWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobWebhookNotificationsOnStreamingBacklogExceededOutput) +} + +// JobWebhookNotificationsOnStreamingBacklogExceededArrayInput is an input type that accepts JobWebhookNotificationsOnStreamingBacklogExceededArray and JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput values. +// You can construct a concrete instance of `JobWebhookNotificationsOnStreamingBacklogExceededArrayInput` via: +// +// JobWebhookNotificationsOnStreamingBacklogExceededArray{ JobWebhookNotificationsOnStreamingBacklogExceededArgs{...} } +type JobWebhookNotificationsOnStreamingBacklogExceededArrayInput interface { + pulumi.Input + + ToJobWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput + ToJobWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Context) JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput +} + +type JobWebhookNotificationsOnStreamingBacklogExceededArray []JobWebhookNotificationsOnStreamingBacklogExceededInput + +func (JobWebhookNotificationsOnStreamingBacklogExceededArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i JobWebhookNotificationsOnStreamingBacklogExceededArray) ToJobWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return i.ToJobWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Background()) +} + +func (i JobWebhookNotificationsOnStreamingBacklogExceededArray) ToJobWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + +type JobWebhookNotificationsOnStreamingBacklogExceededOutput struct{ *pulumi.OutputState } + +func (JobWebhookNotificationsOnStreamingBacklogExceededOutput) ElementType() reflect.Type { + return reflect.TypeOf((*JobWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o JobWebhookNotificationsOnStreamingBacklogExceededOutput) ToJobWebhookNotificationsOnStreamingBacklogExceededOutput() JobWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +func (o JobWebhookNotificationsOnStreamingBacklogExceededOutput) ToJobWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) JobWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +// ID of the job +func (o JobWebhookNotificationsOnStreamingBacklogExceededOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v JobWebhookNotificationsOnStreamingBacklogExceeded) string { return v.Id }).(pulumi.StringOutput) +} + +type JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput struct{ *pulumi.OutputState } + +func (JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToJobWebhookNotificationsOnStreamingBacklogExceededArrayOutput() JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToJobWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput) Index(i pulumi.IntInput) JobWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) JobWebhookNotificationsOnStreamingBacklogExceeded { + return vs[0].([]JobWebhookNotificationsOnStreamingBacklogExceeded)[vs[1].(int)] + }).(JobWebhookNotificationsOnStreamingBacklogExceededOutput) +} + type JobWebhookNotificationsOnSuccess struct { // ID of the job Id string `pulumi:"id"` @@ -74466,6 +75016,7 @@ type GetExternalLocationExternalLocationInfo struct { CredentialName *string `pulumi:"credentialName"` // The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). EncryptionDetails *GetExternalLocationExternalLocationInfoEncryptionDetails `pulumi:"encryptionDetails"` + IsolationMode *string `pulumi:"isolationMode"` // Unique identifier of the parent Metastore. MetastoreId *string `pulumi:"metastoreId"` // The name of the external location @@ -74509,6 +75060,7 @@ type GetExternalLocationExternalLocationInfoArgs struct { CredentialName pulumi.StringPtrInput `pulumi:"credentialName"` // The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). EncryptionDetails GetExternalLocationExternalLocationInfoEncryptionDetailsPtrInput `pulumi:"encryptionDetails"` + IsolationMode pulumi.StringPtrInput `pulumi:"isolationMode"` // Unique identifier of the parent Metastore. MetastoreId pulumi.StringPtrInput `pulumi:"metastoreId"` // The name of the external location @@ -74643,6 +75195,10 @@ func (o GetExternalLocationExternalLocationInfoOutput) EncryptionDetails() GetEx }).(GetExternalLocationExternalLocationInfoEncryptionDetailsPtrOutput) } +func (o GetExternalLocationExternalLocationInfoOutput) IsolationMode() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetExternalLocationExternalLocationInfo) *string { return v.IsolationMode }).(pulumi.StringPtrOutput) +} + // Unique identifier of the parent Metastore. func (o GetExternalLocationExternalLocationInfoOutput) MetastoreId() pulumi.StringPtrOutput { return o.ApplyT(func(v GetExternalLocationExternalLocationInfo) *string { return v.MetastoreId }).(pulumi.StringPtrOutput) @@ -74781,6 +75337,15 @@ func (o GetExternalLocationExternalLocationInfoPtrOutput) EncryptionDetails() Ge }).(GetExternalLocationExternalLocationInfoEncryptionDetailsPtrOutput) } +func (o GetExternalLocationExternalLocationInfoPtrOutput) IsolationMode() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetExternalLocationExternalLocationInfo) *string { + if v == nil { + return nil + } + return v.IsolationMode + }).(pulumi.StringPtrOutput) +} + // Unique identifier of the parent Metastore. func (o GetExternalLocationExternalLocationInfoPtrOutput) MetastoreId() pulumi.StringPtrOutput { return o.ApplyT(func(v *GetExternalLocationExternalLocationInfo) *string { @@ -78757,6 +79322,7 @@ type GetJobJobSettingsSettingsEmailNotifications struct { OnDurationWarningThresholdExceededs []string `pulumi:"onDurationWarningThresholdExceededs"` OnFailures []string `pulumi:"onFailures"` OnStarts []string `pulumi:"onStarts"` + OnStreamingBacklogExceededs []string `pulumi:"onStreamingBacklogExceededs"` OnSuccesses []string `pulumi:"onSuccesses"` } @@ -78776,6 +79342,7 @@ type GetJobJobSettingsSettingsEmailNotificationsArgs struct { OnDurationWarningThresholdExceededs pulumi.StringArrayInput `pulumi:"onDurationWarningThresholdExceededs"` OnFailures pulumi.StringArrayInput `pulumi:"onFailures"` OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs pulumi.StringArrayInput `pulumi:"onStreamingBacklogExceededs"` OnSuccesses pulumi.StringArrayInput `pulumi:"onSuccesses"` } @@ -78874,6 +79441,10 @@ func (o GetJobJobSettingsSettingsEmailNotificationsOutput) OnStarts() pulumi.Str return o.ApplyT(func(v GetJobJobSettingsSettingsEmailNotifications) []string { return v.OnStarts }).(pulumi.StringArrayOutput) } +func (o GetJobJobSettingsSettingsEmailNotificationsOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsEmailNotifications) []string { return v.OnStreamingBacklogExceededs }).(pulumi.StringArrayOutput) +} + func (o GetJobJobSettingsSettingsEmailNotificationsOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsEmailNotifications) []string { return v.OnSuccesses }).(pulumi.StringArrayOutput) } @@ -78938,6 +79509,15 @@ func (o GetJobJobSettingsSettingsEmailNotificationsPtrOutput) OnStarts() pulumi. }).(pulumi.StringArrayOutput) } +func (o GetJobJobSettingsSettingsEmailNotificationsPtrOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsEmailNotifications) []string { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(pulumi.StringArrayOutput) +} + func (o GetJobJobSettingsSettingsEmailNotificationsPtrOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsEmailNotifications) []string { if v == nil { @@ -82435,628 +83015,6 @@ func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFilePtrOutput) De }).(pulumi.StringPtrOutput) } -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs struct { - Destination string `pulumi:"destination"` -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{...} -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs struct { - Destination pulumi.StringInput `pulumi:"destination"` -} - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx) -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{...} -// -// or: -// -// nil -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput -} - -type getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs - -func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput { - return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType)(v) -} - -func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Background()) -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Background()) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs { - return &v - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) Destination() pulumi.StringOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) string { return v.Destination }).(pulumi.StringOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs { - if v != nil { - return *v - } - var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs - return ret - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) Destination() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) *string { - if v == nil { - return nil - } - return &v.Destination - }).(pulumi.StringPtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 struct { - CannedAcl *string `pulumi:"cannedAcl"` - Destination string `pulumi:"destination"` - EnableEncryption *bool `pulumi:"enableEncryption"` - EncryptionType *string `pulumi:"encryptionType"` - Endpoint *string `pulumi:"endpoint"` - KmsKey *string `pulumi:"kmsKey"` - Region *string `pulumi:"region"` -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{...} -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args struct { - CannedAcl pulumi.StringPtrInput `pulumi:"cannedAcl"` - Destination pulumi.StringInput `pulumi:"destination"` - EnableEncryption pulumi.BoolPtrInput `pulumi:"enableEncryption"` - EncryptionType pulumi.StringPtrInput `pulumi:"encryptionType"` - Endpoint pulumi.StringPtrInput `pulumi:"endpoint"` - KmsKey pulumi.StringPtrInput `pulumi:"kmsKey"` - Region pulumi.StringPtrInput `pulumi:"region"` -} - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx) -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Ptr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{...} -// -// or: -// -// nil -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput -} - -type getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args - -func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Ptr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput { - return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType)(v) -} - -func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Background()) -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Background()) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 { - return &v - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) CannedAcl() pulumi.StringPtrOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.CannedAcl }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) Destination() pulumi.StringOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) string { return v.Destination }).(pulumi.StringOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) EnableEncryption() pulumi.BoolPtrOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *bool { return v.EnableEncryption }).(pulumi.BoolPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) EncryptionType() pulumi.StringPtrOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.EncryptionType }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) Endpoint() pulumi.StringPtrOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.Endpoint }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) KmsKey() pulumi.StringPtrOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.KmsKey }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) Region() pulumi.StringPtrOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.Region }).(pulumi.StringPtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 { - if v != nil { - return *v - } - var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 - return ret - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) CannedAcl() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { - if v == nil { - return nil - } - return v.CannedAcl - }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Destination() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { - if v == nil { - return nil - } - return &v.Destination - }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) EnableEncryption() pulumi.BoolPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *bool { - if v == nil { - return nil - } - return v.EnableEncryption - }).(pulumi.BoolPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) EncryptionType() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { - if v == nil { - return nil - } - return v.EncryptionType - }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Endpoint() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { - if v == nil { - return nil - } - return v.Endpoint - }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) KmsKey() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { - if v == nil { - return nil - } - return v.KmsKey - }).(pulumi.StringPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Region() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { - if v == nil { - return nil - } - return v.Region - }).(pulumi.StringPtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes struct { - Destination string `pulumi:"destination"` -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{...} -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs struct { - Destination pulumi.StringInput `pulumi:"destination"` -} - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx) -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{...} -// -// or: -// -// nil -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput -} - -type getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs - -func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput { - return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType)(v) -} - -func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Background()) -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Background()) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes { - return &v - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) Destination() pulumi.StringOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) string { return v.Destination }).(pulumi.StringOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes { - if v != nil { - return *v - } - var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes - return ret - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) Destination() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) *string { - if v == nil { - return nil - } - return &v.Destination - }).(pulumi.StringPtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace struct { - Destination string `pulumi:"destination"` -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{...} -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs struct { - Destination pulumi.StringInput `pulumi:"destination"` -} - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Background()) -} - -func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx) -} - -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput values. -// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput` via: -// -// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{...} -// -// or: -// -// nil -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput interface { - pulumi.Input - - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput - ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput -} - -type getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs - -func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput { - return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType)(v) -} - -func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Background()) -} - -func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ElementType() reflect.Type { - return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Background()) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace { - return &v - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) Destination() pulumi.StringOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) string { return v.Destination }).(pulumi.StringOutput) -} - -type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput struct{ *pulumi.OutputState } - -func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { - return o -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace { - if v != nil { - return *v - } - var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace - return ret - }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) -} - -func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) Destination() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) *string { - if v == nil { - return nil - } - return &v.Destination - }).(pulumi.StringPtrOutput) -} - func init() { pulumi.RegisterInputType(reflect.TypeOf((*AccessControlRuleSetGrantRuleInput)(nil)).Elem(), AccessControlRuleSetGrantRuleArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*AccessControlRuleSetGrantRuleArrayInput)(nil)).Elem(), AccessControlRuleSetGrantRuleArray{}) @@ -83453,6 +83411,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnFailureArrayInput)(nil)).Elem(), JobTaskForEachTaskTaskWebhookNotificationsOnFailureArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnStartInput)(nil)).Elem(), JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnStartArrayInput)(nil)).Elem(), JobTaskForEachTaskTaskWebhookNotificationsOnStartArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput)(nil)).Elem(), JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput)(nil)).Elem(), JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnSuccessInput)(nil)).Elem(), JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayInput)(nil)).Elem(), JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskHealthInput)(nil)).Elem(), JobTaskHealthArgs{}) @@ -83560,12 +83520,16 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*JobTaskWebhookNotificationsOnFailureArrayInput)(nil)).Elem(), JobTaskWebhookNotificationsOnFailureArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskWebhookNotificationsOnStartInput)(nil)).Elem(), JobTaskWebhookNotificationsOnStartArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskWebhookNotificationsOnStartArrayInput)(nil)).Elem(), JobTaskWebhookNotificationsOnStartArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTaskWebhookNotificationsOnStreamingBacklogExceededInput)(nil)).Elem(), JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput)(nil)).Elem(), JobTaskWebhookNotificationsOnStreamingBacklogExceededArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskWebhookNotificationsOnSuccessInput)(nil)).Elem(), JobTaskWebhookNotificationsOnSuccessArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskWebhookNotificationsOnSuccessArrayInput)(nil)).Elem(), JobTaskWebhookNotificationsOnSuccessArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerInput)(nil)).Elem(), JobTriggerArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerPtrInput)(nil)).Elem(), JobTriggerArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerFileArrivalInput)(nil)).Elem(), JobTriggerFileArrivalArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerFileArrivalPtrInput)(nil)).Elem(), JobTriggerFileArrivalArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerPeriodicInput)(nil)).Elem(), JobTriggerPeriodicArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerPeriodicPtrInput)(nil)).Elem(), JobTriggerPeriodicArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerTableInput)(nil)).Elem(), JobTriggerTableArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerTablePtrInput)(nil)).Elem(), JobTriggerTableArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTriggerTableUpdateInput)(nil)).Elem(), JobTriggerTableUpdateArgs{}) @@ -83578,6 +83542,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*JobWebhookNotificationsOnFailureArrayInput)(nil)).Elem(), JobWebhookNotificationsOnFailureArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobWebhookNotificationsOnStartInput)(nil)).Elem(), JobWebhookNotificationsOnStartArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobWebhookNotificationsOnStartArrayInput)(nil)).Elem(), JobWebhookNotificationsOnStartArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobWebhookNotificationsOnStreamingBacklogExceededInput)(nil)).Elem(), JobWebhookNotificationsOnStreamingBacklogExceededArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobWebhookNotificationsOnStreamingBacklogExceededArrayInput)(nil)).Elem(), JobWebhookNotificationsOnStreamingBacklogExceededArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobWebhookNotificationsOnSuccessInput)(nil)).Elem(), JobWebhookNotificationsOnSuccessArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobWebhookNotificationsOnSuccessArrayInput)(nil)).Elem(), JobWebhookNotificationsOnSuccessArray{}) pulumi.RegisterInputType(reflect.TypeOf((*LakehouseMonitorCustomMetricInput)(nil)).Elem(), LakehouseMonitorCustomMetricArgs{}) @@ -84042,14 +84008,6 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptDbfsPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptDbfsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFileInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFileArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFilePtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFileArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{}) pulumi.RegisterOutputType(AccessControlRuleSetGrantRuleOutput{}) pulumi.RegisterOutputType(AccessControlRuleSetGrantRuleArrayOutput{}) pulumi.RegisterOutputType(ArtifactAllowlistArtifactMatcherOutput{}) @@ -84445,6 +84403,8 @@ func init() { pulumi.RegisterOutputType(JobTaskForEachTaskTaskWebhookNotificationsOnFailureArrayOutput{}) pulumi.RegisterOutputType(JobTaskForEachTaskTaskWebhookNotificationsOnStartOutput{}) pulumi.RegisterOutputType(JobTaskForEachTaskTaskWebhookNotificationsOnStartArrayOutput{}) + pulumi.RegisterOutputType(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput{}) + pulumi.RegisterOutputType(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput{}) pulumi.RegisterOutputType(JobTaskForEachTaskTaskWebhookNotificationsOnSuccessOutput{}) pulumi.RegisterOutputType(JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayOutput{}) pulumi.RegisterOutputType(JobTaskHealthOutput{}) @@ -84552,12 +84512,16 @@ func init() { pulumi.RegisterOutputType(JobTaskWebhookNotificationsOnFailureArrayOutput{}) pulumi.RegisterOutputType(JobTaskWebhookNotificationsOnStartOutput{}) pulumi.RegisterOutputType(JobTaskWebhookNotificationsOnStartArrayOutput{}) + pulumi.RegisterOutputType(JobTaskWebhookNotificationsOnStreamingBacklogExceededOutput{}) + pulumi.RegisterOutputType(JobTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput{}) pulumi.RegisterOutputType(JobTaskWebhookNotificationsOnSuccessOutput{}) pulumi.RegisterOutputType(JobTaskWebhookNotificationsOnSuccessArrayOutput{}) pulumi.RegisterOutputType(JobTriggerOutput{}) pulumi.RegisterOutputType(JobTriggerPtrOutput{}) pulumi.RegisterOutputType(JobTriggerFileArrivalOutput{}) pulumi.RegisterOutputType(JobTriggerFileArrivalPtrOutput{}) + pulumi.RegisterOutputType(JobTriggerPeriodicOutput{}) + pulumi.RegisterOutputType(JobTriggerPeriodicPtrOutput{}) pulumi.RegisterOutputType(JobTriggerTableOutput{}) pulumi.RegisterOutputType(JobTriggerTablePtrOutput{}) pulumi.RegisterOutputType(JobTriggerTableUpdateOutput{}) @@ -84570,6 +84534,8 @@ func init() { pulumi.RegisterOutputType(JobWebhookNotificationsOnFailureArrayOutput{}) pulumi.RegisterOutputType(JobWebhookNotificationsOnStartOutput{}) pulumi.RegisterOutputType(JobWebhookNotificationsOnStartArrayOutput{}) + pulumi.RegisterOutputType(JobWebhookNotificationsOnStreamingBacklogExceededOutput{}) + pulumi.RegisterOutputType(JobWebhookNotificationsOnStreamingBacklogExceededArrayOutput{}) pulumi.RegisterOutputType(JobWebhookNotificationsOnSuccessOutput{}) pulumi.RegisterOutputType(JobWebhookNotificationsOnSuccessArrayOutput{}) pulumi.RegisterOutputType(LakehouseMonitorCustomMetricOutput{}) @@ -85034,12 +85000,4 @@ func init() { pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptDbfsPtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFileOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFilePtrOutput{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput{}) - pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput{}) } diff --git a/sdk/go/databricks/pulumiTypes1.go b/sdk/go/databricks/pulumiTypes1.go index 6e03947b..be8fc992 100644 --- a/sdk/go/databricks/pulumiTypes1.go +++ b/sdk/go/databricks/pulumiTypes1.go @@ -13,6 +13,628 @@ import ( var _ = internal.GetEnvOrDefault +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs struct { + Destination string `pulumi:"destination"` +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{...} +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs struct { + Destination pulumi.StringInput `pulumi:"destination"` +} + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx) +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{...} +// +// or: +// +// nil +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput +} + +type getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs + +func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput { + return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType)(v) +} + +func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Background()) +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(context.Background()) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs { + return &v + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) Destination() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) string { return v.Destination }).(pulumi.StringOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs { + if v != nil { + return *v + } + var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs + return ret + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput) Destination() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs) *string { + if v == nil { + return nil + } + return &v.Destination + }).(pulumi.StringPtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 struct { + CannedAcl *string `pulumi:"cannedAcl"` + Destination string `pulumi:"destination"` + EnableEncryption *bool `pulumi:"enableEncryption"` + EncryptionType *string `pulumi:"encryptionType"` + Endpoint *string `pulumi:"endpoint"` + KmsKey *string `pulumi:"kmsKey"` + Region *string `pulumi:"region"` +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{...} +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args struct { + CannedAcl pulumi.StringPtrInput `pulumi:"cannedAcl"` + Destination pulumi.StringInput `pulumi:"destination"` + EnableEncryption pulumi.BoolPtrInput `pulumi:"enableEncryption"` + EncryptionType pulumi.StringPtrInput `pulumi:"encryptionType"` + Endpoint pulumi.StringPtrInput `pulumi:"endpoint"` + KmsKey pulumi.StringPtrInput `pulumi:"kmsKey"` + Region pulumi.StringPtrInput `pulumi:"region"` +} + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx) +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Ptr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{...} +// +// or: +// +// nil +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput +} + +type getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args + +func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Ptr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput { + return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType)(v) +} + +func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Background()) +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3OutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(context.Background()) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 { + return &v + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) CannedAcl() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.CannedAcl }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) Destination() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) string { return v.Destination }).(pulumi.StringOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) EnableEncryption() pulumi.BoolPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *bool { return v.EnableEncryption }).(pulumi.BoolPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) EncryptionType() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.EncryptionType }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) Endpoint() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.Endpoint }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) KmsKey() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.KmsKey }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) Region() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { return v.Region }).(pulumi.StringPtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 { + if v != nil { + return *v + } + var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 + return ret + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) CannedAcl() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { + if v == nil { + return nil + } + return v.CannedAcl + }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Destination() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { + if v == nil { + return nil + } + return &v.Destination + }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) EnableEncryption() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *bool { + if v == nil { + return nil + } + return v.EnableEncryption + }).(pulumi.BoolPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) EncryptionType() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { + if v == nil { + return nil + } + return v.EncryptionType + }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Endpoint() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { + if v == nil { + return nil + } + return v.Endpoint + }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) KmsKey() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { + if v == nil { + return nil + } + return v.KmsKey + }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput) Region() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3) *string { + if v == nil { + return nil + } + return v.Region + }).(pulumi.StringPtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes struct { + Destination string `pulumi:"destination"` +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{...} +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs struct { + Destination pulumi.StringInput `pulumi:"destination"` +} + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx) +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{...} +// +// or: +// +// nil +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput +} + +type getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs + +func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput { + return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType)(v) +} + +func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Background()) +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(context.Background()) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes { + return &v + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) Destination() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) string { return v.Destination }).(pulumi.StringOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes { + if v != nil { + return *v + } + var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes + return ret + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput) Destination() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumes) *string { + if v == nil { + return nil + } + return &v.Destination + }).(pulumi.StringPtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace struct { + Destination string `pulumi:"destination"` +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{...} +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs struct { + Destination pulumi.StringInput `pulumi:"destination"` +} + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput).ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx) +} + +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput is an input type that accepts GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs, GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtr and GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput` via: +// +// GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{...} +// +// or: +// +// nil +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput + ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput +} + +type getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs + +func GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtr(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput { + return (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType)(v) +} + +func (*getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return i.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Background()) +} + +func (i *getJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrType) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return o.ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(context.Background()) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace { + return &v + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) Destination() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) string { return v.Destination }).(pulumi.StringOutput) +} + +type GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) ToGetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) Elem() GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace { + if v != nil { + return *v + } + var ret GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace + return ret + }).(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput) +} + +func (o GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput) Destination() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspace) *string { + if v == nil { + return nil + } + return &v.Destination + }).(pulumi.StringPtrOutput) +} + type GetJobJobSettingsSettingsJobClusterNewClusterWorkloadType struct { Clients GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClients `pulumi:"clients"` } @@ -7222,6 +7844,7 @@ type GetJobJobSettingsSettingsTaskEmailNotifications struct { OnDurationWarningThresholdExceededs []string `pulumi:"onDurationWarningThresholdExceededs"` OnFailures []string `pulumi:"onFailures"` OnStarts []string `pulumi:"onStarts"` + OnStreamingBacklogExceededs []string `pulumi:"onStreamingBacklogExceededs"` OnSuccesses []string `pulumi:"onSuccesses"` } @@ -7241,6 +7864,7 @@ type GetJobJobSettingsSettingsTaskEmailNotificationsArgs struct { OnDurationWarningThresholdExceededs pulumi.StringArrayInput `pulumi:"onDurationWarningThresholdExceededs"` OnFailures pulumi.StringArrayInput `pulumi:"onFailures"` OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs pulumi.StringArrayInput `pulumi:"onStreamingBacklogExceededs"` OnSuccesses pulumi.StringArrayInput `pulumi:"onSuccesses"` } @@ -7339,6 +7963,10 @@ func (o GetJobJobSettingsSettingsTaskEmailNotificationsOutput) OnStarts() pulumi return o.ApplyT(func(v GetJobJobSettingsSettingsTaskEmailNotifications) []string { return v.OnStarts }).(pulumi.StringArrayOutput) } +func (o GetJobJobSettingsSettingsTaskEmailNotificationsOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskEmailNotifications) []string { return v.OnStreamingBacklogExceededs }).(pulumi.StringArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskEmailNotificationsOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsTaskEmailNotifications) []string { return v.OnSuccesses }).(pulumi.StringArrayOutput) } @@ -7403,6 +8031,15 @@ func (o GetJobJobSettingsSettingsTaskEmailNotificationsPtrOutput) OnStarts() pul }).(pulumi.StringArrayOutput) } +func (o GetJobJobSettingsSettingsTaskEmailNotificationsPtrOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskEmailNotifications) []string { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(pulumi.StringArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskEmailNotificationsPtrOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskEmailNotifications) []string { if v == nil { @@ -8625,6 +9262,7 @@ type GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struct { OnDurationWarningThresholdExceededs []string `pulumi:"onDurationWarningThresholdExceededs"` OnFailures []string `pulumi:"onFailures"` OnStarts []string `pulumi:"onStarts"` + OnStreamingBacklogExceededs []string `pulumi:"onStreamingBacklogExceededs"` OnSuccesses []string `pulumi:"onSuccesses"` } @@ -8644,6 +9282,7 @@ type GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs struct { OnDurationWarningThresholdExceededs pulumi.StringArrayInput `pulumi:"onDurationWarningThresholdExceededs"` OnFailures pulumi.StringArrayInput `pulumi:"onFailures"` OnStarts pulumi.StringArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs pulumi.StringArrayInput `pulumi:"onStreamingBacklogExceededs"` OnSuccesses pulumi.StringArrayInput `pulumi:"onSuccesses"` } @@ -8744,6 +9383,12 @@ func (o GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsOutput) On return o.ApplyT(func(v GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications) []string { return v.OnStarts }).(pulumi.StringArrayOutput) } +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications) []string { + return v.OnStreamingBacklogExceededs + }).(pulumi.StringArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications) []string { return v.OnSuccesses }).(pulumi.StringArrayOutput) } @@ -8808,6 +9453,15 @@ func (o GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsPtrOutput) }).(pulumi.StringArrayOutput) } +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsPtrOutput) OnStreamingBacklogExceededs() pulumi.StringArrayOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications) []string { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(pulumi.StringArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsPtrOutput) OnSuccesses() pulumi.StringArrayOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications) []string { if v == nil { @@ -15845,6 +16499,7 @@ type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications struct { OnDurationWarningThresholdExceededs []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `pulumi:"onDurationWarningThresholdExceededs"` OnFailures []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure `pulumi:"onFailures"` OnStarts []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStreamingBacklogExceededs []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `pulumi:"onStreamingBacklogExceededs"` OnSuccesses []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess `pulumi:"onSuccesses"` } @@ -15863,6 +16518,7 @@ type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs struct OnDurationWarningThresholdExceededs GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArrayInput `pulumi:"onDurationWarningThresholdExceededs"` OnFailures GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureArrayInput `pulumi:"onFailures"` OnStarts GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput `pulumi:"onStreamingBacklogExceededs"` OnSuccesses GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayInput `pulumi:"onSuccesses"` } @@ -15961,6 +16617,12 @@ func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOutput) }).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArrayOutput) } +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOutput) OnStreamingBacklogExceededs() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + return v.OnStreamingBacklogExceededs + }).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOutput) OnSuccesses() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess { return v.OnSuccesses @@ -16018,6 +16680,15 @@ func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsPtrOutpu }).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArrayOutput) } +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsPtrOutput) OnStreamingBacklogExceededs() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsPtrOutput) OnSuccesses() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess { if v == nil { @@ -16320,6 +16991,105 @@ func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartA }).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartOutput) } +type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + // the id of Job if the resource was matched by name. + Id string `pulumi:"id"` +} + +// GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput is an input type that accepts GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs and GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput` via: +// +// GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} +type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput + ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Context) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput +} + +type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs struct { + // the id of Job if the resource was matched by name. + Id pulumi.StringInput `pulumi:"id"` +} + +func (GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return i.ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + +// GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput is an input type that accepts GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray and GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput` via: +// +// GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray{ GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} } +type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput + ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Context) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput +} + +type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray []GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput + +func (GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return i.ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + +type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +// the id of Job if the resource was matched by name. +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded) string { + return v.Id + }).(pulumi.StringOutput) +} + +type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToGetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) Index(i pulumi.IntInput) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + return vs[0].([]GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded)[vs[1].(int)] + }).(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + type GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { // the id of Job if the resource was matched by name. Id string `pulumi:"id"` @@ -23329,6 +24099,7 @@ type GetJobJobSettingsSettingsTaskWebhookNotifications struct { OnDurationWarningThresholdExceededs []GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded `pulumi:"onDurationWarningThresholdExceededs"` OnFailures []GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailure `pulumi:"onFailures"` OnStarts []GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStreamingBacklogExceededs []GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded `pulumi:"onStreamingBacklogExceededs"` OnSuccesses []GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess `pulumi:"onSuccesses"` } @@ -23347,6 +24118,7 @@ type GetJobJobSettingsSettingsTaskWebhookNotificationsArgs struct { OnDurationWarningThresholdExceededs GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceededArrayInput `pulumi:"onDurationWarningThresholdExceededs"` OnFailures GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureArrayInput `pulumi:"onFailures"` OnStarts GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput `pulumi:"onStreamingBacklogExceededs"` OnSuccesses GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArrayInput `pulumi:"onSuccesses"` } @@ -23445,6 +24217,12 @@ func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOutput) OnStarts() GetJ }).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArrayOutput) } +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOutput) OnStreamingBacklogExceededs() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded { + return v.OnStreamingBacklogExceededs + }).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOutput) OnSuccesses() GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess { return v.OnSuccesses @@ -23502,6 +24280,15 @@ func (o GetJobJobSettingsSettingsTaskWebhookNotificationsPtrOutput) OnStarts() G }).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArrayOutput) } +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsPtrOutput) OnStreamingBacklogExceededs() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + func (o GetJobJobSettingsSettingsTaskWebhookNotificationsPtrOutput) OnSuccesses() GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskWebhookNotifications) []GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess { if v == nil { @@ -23804,6 +24591,105 @@ func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArrayOutput) Ind }).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartOutput) } +type GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + // the id of Job if the resource was matched by name. + Id string `pulumi:"id"` +} + +// GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInput is an input type that accepts GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs and GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInput` via: +// +// GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} +type GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput + ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Context) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput +} + +type GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs struct { + // the id of Job if the resource was matched by name. + Id pulumi.StringInput `pulumi:"id"` +} + +func (GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return i.ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + +// GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput is an input type that accepts GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArray and GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput` via: +// +// GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArray{ GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs{...} } +type GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput + ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Context) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput +} + +type GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArray []GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInput + +func (GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return i.ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArray) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + +type GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +// the id of Job if the resource was matched by name. +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded) string { + return v.Id + }).(pulumi.StringOutput) +} + +type GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToGetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput) Index(i pulumi.IntInput) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded { + return vs[0].([]GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded)[vs[1].(int)] + }).(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput) +} + type GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess struct { // the id of Job if the resource was matched by name. Id string `pulumi:"id"` @@ -24413,6 +25299,7 @@ type GetJobJobSettingsSettingsWebhookNotifications struct { OnDurationWarningThresholdExceededs []GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded `pulumi:"onDurationWarningThresholdExceededs"` OnFailures []GetJobJobSettingsSettingsWebhookNotificationsOnFailure `pulumi:"onFailures"` OnStarts []GetJobJobSettingsSettingsWebhookNotificationsOnStart `pulumi:"onStarts"` + OnStreamingBacklogExceededs []GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded `pulumi:"onStreamingBacklogExceededs"` OnSuccesses []GetJobJobSettingsSettingsWebhookNotificationsOnSuccess `pulumi:"onSuccesses"` } @@ -24431,6 +25318,7 @@ type GetJobJobSettingsSettingsWebhookNotificationsArgs struct { OnDurationWarningThresholdExceededs GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceededArrayInput `pulumi:"onDurationWarningThresholdExceededs"` OnFailures GetJobJobSettingsSettingsWebhookNotificationsOnFailureArrayInput `pulumi:"onFailures"` OnStarts GetJobJobSettingsSettingsWebhookNotificationsOnStartArrayInput `pulumi:"onStarts"` + OnStreamingBacklogExceededs GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayInput `pulumi:"onStreamingBacklogExceededs"` OnSuccesses GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArrayInput `pulumi:"onSuccesses"` } @@ -24529,6 +25417,12 @@ func (o GetJobJobSettingsSettingsWebhookNotificationsOutput) OnStarts() GetJobJo }).(GetJobJobSettingsSettingsWebhookNotificationsOnStartArrayOutput) } +func (o GetJobJobSettingsSettingsWebhookNotificationsOutput) OnStreamingBacklogExceededs() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsWebhookNotifications) []GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded { + return v.OnStreamingBacklogExceededs + }).(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + func (o GetJobJobSettingsSettingsWebhookNotificationsOutput) OnSuccesses() GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsWebhookNotifications) []GetJobJobSettingsSettingsWebhookNotificationsOnSuccess { return v.OnSuccesses @@ -24586,6 +25480,15 @@ func (o GetJobJobSettingsSettingsWebhookNotificationsPtrOutput) OnStarts() GetJo }).(GetJobJobSettingsSettingsWebhookNotificationsOnStartArrayOutput) } +func (o GetJobJobSettingsSettingsWebhookNotificationsPtrOutput) OnStreamingBacklogExceededs() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsWebhookNotifications) []GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded { + if v == nil { + return nil + } + return v.OnStreamingBacklogExceededs + }).(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + func (o GetJobJobSettingsSettingsWebhookNotificationsPtrOutput) OnSuccesses() GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArrayOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsWebhookNotifications) []GetJobJobSettingsSettingsWebhookNotificationsOnSuccess { if v == nil { @@ -24888,6 +25791,103 @@ func (o GetJobJobSettingsSettingsWebhookNotificationsOnStartArrayOutput) Index(i }).(GetJobJobSettingsSettingsWebhookNotificationsOnStartOutput) } +type GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded struct { + // the id of Job if the resource was matched by name. + Id string `pulumi:"id"` +} + +// GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInput is an input type that accepts GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs and GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInput` via: +// +// GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs{...} +type GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput + ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Context) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput +} + +type GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs struct { + // the id of Job if the resource was matched by name. + Id pulumi.StringInput `pulumi:"id"` +} + +func (GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput { + return i.ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput) +} + +// GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayInput is an input type that accepts GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArray and GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayInput` via: +// +// GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArray{ GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs{...} } +type GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput + ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Context) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput +} + +type GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArray []GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInput + +func (GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArray) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return i.ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArray) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput) +} + +type GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +func (o GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput { + return o +} + +// the id of Job if the resource was matched by name. +func (o GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded) string { return v.Id }).(pulumi.StringOutput) +} + +type GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput() GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput) ToGetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput) Index(i pulumi.IntInput) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded { + return vs[0].([]GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded)[vs[1].(int)] + }).(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput) +} + type GetJobJobSettingsSettingsWebhookNotificationsOnSuccess struct { // the id of Job if the resource was matched by name. Id string `pulumi:"id"` @@ -27293,7 +28293,8 @@ type GetStorageCredentialStorageCredentialInfo struct { // credential details for GCP: DatabricksGcpServiceAccount *GetStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccount `pulumi:"databricksGcpServiceAccount"` // Unique ID of storage credential. - Id *string `pulumi:"id"` + Id *string `pulumi:"id"` + IsolationMode *string `pulumi:"isolationMode"` // Unique identifier of the parent Metastore. MetastoreId *string `pulumi:"metastoreId"` // The name of the storage credential @@ -27336,7 +28337,8 @@ type GetStorageCredentialStorageCredentialInfoArgs struct { // credential details for GCP: DatabricksGcpServiceAccount GetStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccountPtrInput `pulumi:"databricksGcpServiceAccount"` // Unique ID of storage credential. - Id pulumi.StringPtrInput `pulumi:"id"` + Id pulumi.StringPtrInput `pulumi:"id"` + IsolationMode pulumi.StringPtrInput `pulumi:"isolationMode"` // Unique identifier of the parent Metastore. MetastoreId pulumi.StringPtrInput `pulumi:"metastoreId"` // The name of the storage credential @@ -27482,6 +28484,10 @@ func (o GetStorageCredentialStorageCredentialInfoOutput) Id() pulumi.StringPtrOu return o.ApplyT(func(v GetStorageCredentialStorageCredentialInfo) *string { return v.Id }).(pulumi.StringPtrOutput) } +func (o GetStorageCredentialStorageCredentialInfoOutput) IsolationMode() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetStorageCredentialStorageCredentialInfo) *string { return v.IsolationMode }).(pulumi.StringPtrOutput) +} + // Unique identifier of the parent Metastore. func (o GetStorageCredentialStorageCredentialInfoOutput) MetastoreId() pulumi.StringPtrOutput { return o.ApplyT(func(v GetStorageCredentialStorageCredentialInfo) *string { return v.MetastoreId }).(pulumi.StringPtrOutput) @@ -27628,6 +28634,15 @@ func (o GetStorageCredentialStorageCredentialInfoPtrOutput) Id() pulumi.StringPt }).(pulumi.StringPtrOutput) } +func (o GetStorageCredentialStorageCredentialInfoPtrOutput) IsolationMode() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetStorageCredentialStorageCredentialInfo) *string { + if v == nil { + return nil + } + return v.IsolationMode + }).(pulumi.StringPtrOutput) +} + // Unique identifier of the parent Metastore. func (o GetStorageCredentialStorageCredentialInfoPtrOutput) MetastoreId() pulumi.StringPtrOutput { return o.ApplyT(func(v *GetStorageCredentialStorageCredentialInfo) *string { @@ -31309,6 +32324,14 @@ func (o GetTableTableInfoViewDependenciesDependencyTablePtrOutput) TableFullName } func init() { + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Input)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Args{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypePtrInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClientsInput)(nil)).Elem(), GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClientsArgs{}) @@ -31503,6 +32526,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskHealthInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskHealthArgs{}) @@ -31598,6 +32623,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTriggerInput)(nil)).Elem(), GetJobJobSettingsSettingsTriggerArgs{}) @@ -31614,6 +32641,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnFailureArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsWebhookNotificationsOnFailureArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnStartInput)(nil)).Elem(), GetJobJobSettingsSettingsWebhookNotificationsOnStartArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnStartArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsWebhookNotificationsOnStartArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededInput)(nil)).Elem(), GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnSuccessInput)(nil)).Elem(), GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetMetastoreMetastoreInfoInput)(nil)).Elem(), GetMetastoreMetastoreInfoArgs{}) @@ -31690,6 +32719,14 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetTableTableInfoViewDependenciesDependencyFunctionPtrInput)(nil)).Elem(), GetTableTableInfoViewDependenciesDependencyFunctionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetTableTableInfoViewDependenciesDependencyTableInput)(nil)).Elem(), GetTableTableInfoViewDependenciesDependencyTableArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetTableTableInfoViewDependenciesDependencyTablePtrInput)(nil)).Elem(), GetTableTableInfoViewDependenciesDependencyTableArgs{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcsPtrOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3Output{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3PtrOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptVolumesPtrOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspaceOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterInitScriptWorkspacePtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypePtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClientsOutput{}) @@ -31884,6 +32921,8 @@ func init() { pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArrayOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskHealthOutput{}) @@ -31979,6 +33018,8 @@ func init() { pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArrayOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTriggerOutput{}) @@ -31995,6 +33036,8 @@ func init() { pulumi.RegisterOutputType(GetJobJobSettingsSettingsWebhookNotificationsOnFailureArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsWebhookNotificationsOnStartOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsWebhookNotificationsOnStartArrayOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsWebhookNotificationsOnSuccessOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArrayOutput{}) pulumi.RegisterOutputType(GetMetastoreMetastoreInfoOutput{}) diff --git a/sdk/go/databricks/sqlPermissions.go b/sdk/go/databricks/sqlPermissions.go index 746b198e..48732f9d 100644 --- a/sdk/go/databricks/sqlPermissions.go +++ b/sdk/go/databricks/sqlPermissions.go @@ -98,8 +98,9 @@ type SqlPermissions struct { // If this access control for reading/writing any file. Defaults to `false`. AnyFile pulumi.BoolPtrOutput `pulumi:"anyFile"` // If this access control for the entire catalog. Defaults to `false`. - Catalog pulumi.BoolPtrOutput `pulumi:"catalog"` - ClusterId pulumi.StringOutput `pulumi:"clusterId"` + Catalog pulumi.BoolPtrOutput `pulumi:"catalog"` + // Id of an existing databricks_cluster, otherwise resource creation will fail. + ClusterId pulumi.StringOutput `pulumi:"clusterId"` // Name of the database. Has default value of `default`. Database pulumi.StringPtrOutput `pulumi:"database"` PrivilegeAssignments SqlPermissionsPrivilegeAssignmentArrayOutput `pulumi:"privilegeAssignments"` @@ -144,7 +145,8 @@ type sqlPermissionsState struct { // If this access control for reading/writing any file. Defaults to `false`. AnyFile *bool `pulumi:"anyFile"` // If this access control for the entire catalog. Defaults to `false`. - Catalog *bool `pulumi:"catalog"` + Catalog *bool `pulumi:"catalog"` + // Id of an existing databricks_cluster, otherwise resource creation will fail. ClusterId *string `pulumi:"clusterId"` // Name of the database. Has default value of `default`. Database *string `pulumi:"database"` @@ -161,7 +163,8 @@ type SqlPermissionsState struct { // If this access control for reading/writing any file. Defaults to `false`. AnyFile pulumi.BoolPtrInput // If this access control for the entire catalog. Defaults to `false`. - Catalog pulumi.BoolPtrInput + Catalog pulumi.BoolPtrInput + // Id of an existing databricks_cluster, otherwise resource creation will fail. ClusterId pulumi.StringPtrInput // Name of the database. Has default value of `default`. Database pulumi.StringPtrInput @@ -182,7 +185,8 @@ type sqlPermissionsArgs struct { // If this access control for reading/writing any file. Defaults to `false`. AnyFile *bool `pulumi:"anyFile"` // If this access control for the entire catalog. Defaults to `false`. - Catalog *bool `pulumi:"catalog"` + Catalog *bool `pulumi:"catalog"` + // Id of an existing databricks_cluster, otherwise resource creation will fail. ClusterId *string `pulumi:"clusterId"` // Name of the database. Has default value of `default`. Database *string `pulumi:"database"` @@ -200,7 +204,8 @@ type SqlPermissionsArgs struct { // If this access control for reading/writing any file. Defaults to `false`. AnyFile pulumi.BoolPtrInput // If this access control for the entire catalog. Defaults to `false`. - Catalog pulumi.BoolPtrInput + Catalog pulumi.BoolPtrInput + // Id of an existing databricks_cluster, otherwise resource creation will fail. ClusterId pulumi.StringPtrInput // Name of the database. Has default value of `default`. Database pulumi.StringPtrInput @@ -313,6 +318,7 @@ func (o SqlPermissionsOutput) Catalog() pulumi.BoolPtrOutput { return o.ApplyT(func(v *SqlPermissions) pulumi.BoolPtrOutput { return v.Catalog }).(pulumi.BoolPtrOutput) } +// Id of an existing databricks_cluster, otherwise resource creation will fail. func (o SqlPermissionsOutput) ClusterId() pulumi.StringOutput { return o.ApplyT(func(v *SqlPermissions) pulumi.StringOutput { return v.ClusterId }).(pulumi.StringOutput) } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Config.java b/sdk/java/src/main/java/com/pulumi/databricks/Config.java index c2424424..bf5d888e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Config.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Config.java @@ -87,6 +87,9 @@ public Optional rateLimit() { public Optional retryTimeoutSeconds() { return Codegen.integerProp("retryTimeoutSeconds").config(config).get(); } + public Optional serverlessComputeId() { + return Codegen.stringProp("serverlessComputeId").config(config).get(); + } public Optional skipVerify() { return Codegen.booleanProp("skipVerify").config(config).get(); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java index 4db3764c..69512f16 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java @@ -235,7 +235,7 @@ public final class DatabricksFunctions { * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). * @@ -323,7 +323,7 @@ public static Output getAwsAssumeRolePolicy(GetAws * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). * @@ -411,7 +411,7 @@ public static CompletableFuture getAwsAssumeRolePo * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). * @@ -499,7 +499,7 @@ public static Output getAwsAssumeRolePolicy(GetAws * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). * @@ -787,7 +787,7 @@ public static CompletableFuture getAwsBucketPolicyPlai * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -840,7 +840,7 @@ public static Output getAwsCrossAccountPolicy() * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -893,7 +893,7 @@ public static CompletableFuture getAwsCrossAccou * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -946,7 +946,7 @@ public static Output getAwsCrossAccountPolicy(Ge * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -999,7 +999,7 @@ public static CompletableFuture getAwsCrossAccou * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -1052,7 +1052,7 @@ public static Output getAwsCrossAccountPolicy(Ge * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -1064,7 +1064,7 @@ public static CompletableFuture getAwsCrossAccou /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog assume role policy for you. + * This data source constructs the necessary AWS Unity Catalog assume role policy for you. * * ## Example Usage * @@ -1116,7 +1116,7 @@ public static CompletableFuture getAwsCrossAccou * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -1133,7 +1133,7 @@ public static Output getAwsUnityCatalo /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog assume role policy for you. + * This data source constructs the necessary AWS Unity Catalog assume role policy for you. * * ## Example Usage * @@ -1185,7 +1185,7 @@ public static Output getAwsUnityCatalo * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -1202,7 +1202,7 @@ public static CompletableFuture getAws /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog assume role policy for you. + * This data source constructs the necessary AWS Unity Catalog assume role policy for you. * * ## Example Usage * @@ -1254,7 +1254,7 @@ public static CompletableFuture getAws * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -1271,7 +1271,7 @@ public static Output getAwsUnityCatalo /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog assume role policy for you. + * This data source constructs the necessary AWS Unity Catalog assume role policy for you. * * ## Example Usage * @@ -1323,7 +1323,7 @@ public static Output getAwsUnityCatalo * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -1340,7 +1340,7 @@ public static CompletableFuture getAws /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog policy for you. + * This data source constructs the necessary AWS Unity Catalog policy for you. * * ## Example Usage * @@ -1392,7 +1392,7 @@ public static CompletableFuture getAws * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -1409,7 +1409,7 @@ public static Output getAwsUnityCatalogPolicy(Ge /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog policy for you. + * This data source constructs the necessary AWS Unity Catalog policy for you. * * ## Example Usage * @@ -1461,7 +1461,7 @@ public static Output getAwsUnityCatalogPolicy(Ge * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -1478,7 +1478,7 @@ public static CompletableFuture getAwsUnityCatal /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog policy for you. + * This data source constructs the necessary AWS Unity Catalog policy for you. * * ## Example Usage * @@ -1530,7 +1530,7 @@ public static CompletableFuture getAwsUnityCatal * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -1547,7 +1547,7 @@ public static Output getAwsUnityCatalogPolicy(Ge /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog policy for you. + * This data source constructs the necessary AWS Unity Catalog policy for you. * * ## Example Usage * @@ -1599,7 +1599,7 @@ public static Output getAwsUnityCatalogPolicy(Ge * * var metastoreDataAccess = new Role("metastoreDataAccess", RoleArgs.builder() * .name(String.format("%s-uc-access", prefix)) - * .assumeRolePolicy(passroleForUc.json()) + * .assumeRolePolicy(thisAwsIamPolicyDocument.json()) * .managedPolicyArns(unityMetastore.arn()) * .build()); * @@ -7715,7 +7715,7 @@ public static CompletableFuture getMlflowModelPlain(GetMlf * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ public static Output getMwsCredentials() { @@ -7773,7 +7773,7 @@ public static Output getMwsCredentials() { * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ public static CompletableFuture getMwsCredentialsPlain() { @@ -7831,7 +7831,7 @@ public static CompletableFuture getMwsCredentialsPlain( * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ public static Output getMwsCredentials(GetMwsCredentialsArgs args) { @@ -7889,7 +7889,7 @@ public static Output getMwsCredentials(GetMwsCredential * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ public static CompletableFuture getMwsCredentialsPlain(GetMwsCredentialsPlainArgs args) { @@ -7947,7 +7947,7 @@ public static CompletableFuture getMwsCredentialsPlain( * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ public static Output getMwsCredentials(GetMwsCredentialsArgs args, InvokeOptions options) { @@ -8005,7 +8005,7 @@ public static Output getMwsCredentials(GetMwsCredential * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * */ public static CompletableFuture getMwsCredentialsPlain(GetMwsCredentialsPlainArgs args, InvokeOptions options) { @@ -8058,7 +8058,7 @@ public static CompletableFuture getMwsCredentialsPlain( * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment * */ @@ -8112,7 +8112,7 @@ public static Output getMwsWorkspaces() { * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment * */ @@ -8166,7 +8166,7 @@ public static CompletableFuture getMwsWorkspacesPlain() * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment * */ @@ -8220,7 +8220,7 @@ public static Output getMwsWorkspaces(GetMwsWorkspacesAr * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment * */ @@ -8274,7 +8274,7 @@ public static CompletableFuture getMwsWorkspacesPlain(Ge * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment * */ @@ -8328,7 +8328,7 @@ public static Output getMwsWorkspaces(GetMwsWorkspacesAr * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment * */ diff --git a/sdk/java/src/main/java/com/pulumi/databricks/IpAccessList.java b/sdk/java/src/main/java/com/pulumi/databricks/IpAccessList.java index fc201ef7..44ccbcb6 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/IpAccessList.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/IpAccessList.java @@ -75,7 +75,7 @@ * The following resources are often used in the same context: * * * End to end workspace management guide. - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). * * databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsCredentials.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsCredentials.java index 7a4d6d7a..6846072e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsCredentials.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsCredentials.java @@ -33,7 +33,7 @@ * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java index 50d8c62d..54fc2d3b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java @@ -339,7 +339,7 @@ * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDelivery.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDelivery.java index 52bf111f..1ed0dfb9 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDelivery.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDelivery.java @@ -120,7 +120,7 @@ * * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * @@ -270,14 +270,14 @@ public Output storageConfigurationId() { return this.storageConfigurationId; } /** - * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * */ @Export(name="workspaceIdsFilters", refs={List.class,Integer.class}, tree="[0,1]") private Output> workspaceIdsFilters; /** - * @return By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @return By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * */ public Output>> workspaceIdsFilters() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDeliveryArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDeliveryArgs.java index bff81bad..e522121d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDeliveryArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsLogDeliveryArgs.java @@ -169,14 +169,14 @@ public Output storageConfigurationId() { } /** - * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * */ @Import(name="workspaceIdsFilters") private @Nullable Output> workspaceIdsFilters; /** - * @return By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @return By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * */ public Optional>> workspaceIdsFilters() { @@ -428,7 +428,7 @@ public Builder storageConfigurationId(String storageConfigurationId) { } /** - * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * * @return builder * @@ -439,7 +439,7 @@ public Builder workspaceIdsFilters(@Nullable Output> workspaceIdsF } /** - * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * * @return builder * @@ -449,7 +449,7 @@ public Builder workspaceIdsFilters(List workspaceIdsFilters) { } /** - * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsNetworks.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsNetworks.java index 1f563d58..7017133f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsNetworks.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsNetworks.java @@ -82,13 +82,13 @@ * The following resources are used in the same context: * * * Provisioning Databricks on AWS guide. - * * Provisioning Databricks on AWS with PrivateLink guide. - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * * Provisioning Databricks on AWS with Private Link guide. + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * * Provisioning Databricks on GCP guide. * * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * * databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration. * * databricks.MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html). - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsPrivateAccessSettings.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsPrivateAccessSettings.java index 9afab882..ac9b987d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsPrivateAccessSettings.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsPrivateAccessSettings.java @@ -166,12 +166,12 @@ * The following resources are used in the same context: * * * Provisioning Databricks on AWS guide. - * * Provisioning Databricks on AWS with PrivateLink guide. - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * * Provisioning Databricks on AWS with Private Link guide. + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * * databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration. * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsStorageConfigurations.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsStorageConfigurations.java index d24dd9d7..98bd93de 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsStorageConfigurations.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsStorageConfigurations.java @@ -30,12 +30,12 @@ * The following resources are used in the same context: * * * Provisioning Databricks on AWS guide. - * * Provisioning Databricks on AWS with PrivateLink guide. + * * Provisioning Databricks on AWS with Private Link guide. * * databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS. * * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/OnlineTable.java b/sdk/java/src/main/java/com/pulumi/databricks/OnlineTable.java index 242f0021..eb05e1df 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/OnlineTable.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/OnlineTable.java @@ -119,6 +119,12 @@ public Output> spec() { public Output> statuses() { return this.statuses; } + @Export(name="tableServingUrl", refs={String.class}, tree="[0]") + private Output tableServingUrl; + + public Output> tableServingUrl() { + return Codegen.optional(this.tableServingUrl); + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/OnlineTableArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/OnlineTableArgs.java index d0648166..44f29661 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/OnlineTableArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/OnlineTableArgs.java @@ -46,11 +46,19 @@ public Optional> spec() { return Optional.ofNullable(this.spec); } + @Import(name="tableServingUrl") + private @Nullable Output tableServingUrl; + + public Optional> tableServingUrl() { + return Optional.ofNullable(this.tableServingUrl); + } + private OnlineTableArgs() {} private OnlineTableArgs(OnlineTableArgs $) { this.name = $.name; this.spec = $.spec; + this.tableServingUrl = $.tableServingUrl; } public static Builder builder() { @@ -113,6 +121,15 @@ public Builder spec(OnlineTableSpecArgs spec) { return spec(Output.of(spec)); } + public Builder tableServingUrl(@Nullable Output tableServingUrl) { + $.tableServingUrl = tableServingUrl; + return this; + } + + public Builder tableServingUrl(String tableServingUrl) { + return tableServingUrl(Output.of(tableServingUrl)); + } + public OnlineTableArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Provider.java b/sdk/java/src/main/java/com/pulumi/databricks/Provider.java index db370e45..55aea1ae 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Provider.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Provider.java @@ -137,6 +137,12 @@ public Output> password() { public Output> profile() { return Codegen.optional(this.profile); } + @Export(name="serverlessComputeId", refs={String.class}, tree="[0]") + private Output serverlessComputeId; + + public Output> serverlessComputeId() { + return Codegen.optional(this.serverlessComputeId); + } @Export(name="token", refs={String.class}, tree="[0]") private Output token; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/ProviderArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/ProviderArgs.java index 0e1e69cd..520ec0ab 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/ProviderArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/ProviderArgs.java @@ -192,6 +192,13 @@ public Optional> retryTimeoutSeconds() { return Optional.ofNullable(this.retryTimeoutSeconds); } + @Import(name="serverlessComputeId") + private @Nullable Output serverlessComputeId; + + public Optional> serverlessComputeId() { + return Optional.ofNullable(this.serverlessComputeId); + } + @Import(name="skipVerify", json=true) private @Nullable Output skipVerify; @@ -248,6 +255,7 @@ private ProviderArgs(ProviderArgs $) { this.profile = $.profile; this.rateLimit = $.rateLimit; this.retryTimeoutSeconds = $.retryTimeoutSeconds; + this.serverlessComputeId = $.serverlessComputeId; this.skipVerify = $.skipVerify; this.token = $.token; this.username = $.username; @@ -497,6 +505,15 @@ public Builder retryTimeoutSeconds(Integer retryTimeoutSeconds) { return retryTimeoutSeconds(Output.of(retryTimeoutSeconds)); } + public Builder serverlessComputeId(@Nullable Output serverlessComputeId) { + $.serverlessComputeId = serverlessComputeId; + return this; + } + + public Builder serverlessComputeId(String serverlessComputeId) { + return serverlessComputeId(Output.of(serverlessComputeId)); + } + public Builder skipVerify(@Nullable Output skipVerify) { $.skipVerify = skipVerify; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissions.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissions.java index 33d7412a..92a82211 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissions.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissions.java @@ -149,9 +149,17 @@ public Output> anyFile() { public Output> catalog() { return Codegen.optional(this.catalog); } + /** + * Id of an existing databricks_cluster, otherwise resource creation will fail. + * + */ @Export(name="clusterId", refs={String.class}, tree="[0]") private Output clusterId; + /** + * @return Id of an existing databricks_cluster, otherwise resource creation will fail. + * + */ public Output clusterId() { return this.clusterId; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissionsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissionsArgs.java index 02283b76..7c238f4f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissionsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlPermissionsArgs.java @@ -63,9 +63,17 @@ public Optional> catalog() { return Optional.ofNullable(this.catalog); } + /** + * Id of an existing databricks_cluster, otherwise resource creation will fail. + * + */ @Import(name="clusterId") private @Nullable Output clusterId; + /** + * @return Id of an existing databricks_cluster, otherwise resource creation will fail. + * + */ public Optional> clusterId() { return Optional.ofNullable(this.clusterId); } @@ -216,11 +224,23 @@ public Builder catalog(Boolean catalog) { return catalog(Output.of(catalog)); } + /** + * @param clusterId Id of an existing databricks_cluster, otherwise resource creation will fail. + * + * @return builder + * + */ public Builder clusterId(@Nullable Output clusterId) { $.clusterId = clusterId; return this; } + /** + * @param clusterId Id of an existing databricks_cluster, otherwise resource creation will fail. + * + * @return builder + * + */ public Builder clusterId(String clusterId) { return clusterId(Output.of(clusterId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyArgs.java index 0ec5a3a3..fa0b2b02 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyArgs.java @@ -39,14 +39,14 @@ public Optional> databricksAccountId() { } /** - * Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket * */ @Import(name="databricksE2AccountId") private @Nullable Output databricksE2AccountId; /** - * @return Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * @return Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket * */ public Optional> databricksE2AccountId() { @@ -126,7 +126,7 @@ public Builder databricksAccountId(String databricksAccountId) { } /** - * @param databricksE2AccountId Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * @param databricksE2AccountId Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket * * @return builder * @@ -137,7 +137,7 @@ public Builder databricksE2AccountId(@Nullable Output databricksE2Accoun } /** - * @param databricksE2AccountId Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * @param databricksE2AccountId Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyPlainArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyPlainArgs.java index 4a273667..6f63eef5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyPlainArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsBucketPolicyPlainArgs.java @@ -38,14 +38,14 @@ public Optional databricksAccountId() { } /** - * Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket * */ @Import(name="databricksE2AccountId") private @Nullable String databricksE2AccountId; /** - * @return Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * @return Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket * */ public Optional databricksE2AccountId() { @@ -111,7 +111,7 @@ public Builder databricksAccountId(@Nullable String databricksAccountId) { } /** - * @param databricksE2AccountId Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * @param databricksE2AccountId Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyArgs.java index 6bf164ad..04eb7af6 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyArgs.java @@ -47,14 +47,14 @@ public Output externalId() { } /** - * The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * The name of the AWS IAM role to be created for Unity Catalog. * */ @Import(name="roleName", required=true) private Output roleName; /** - * @return The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * @return The name of the AWS IAM role to be created for Unity Catalog. * */ public Output roleName() { @@ -146,7 +146,7 @@ public Builder externalId(String externalId) { } /** - * @param roleName The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * @param roleName The name of the AWS IAM role to be created for Unity Catalog. * * @return builder * @@ -157,7 +157,7 @@ public Builder roleName(Output roleName) { } /** - * @param roleName The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * @param roleName The name of the AWS IAM role to be created for Unity Catalog. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyPlainArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyPlainArgs.java index be66a21f..f87b249f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyPlainArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetAwsUnityCatalogAssumeRolePolicyPlainArgs.java @@ -46,14 +46,14 @@ public String externalId() { } /** - * The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * The name of the AWS IAM role to be created for Unity Catalog. * */ @Import(name="roleName", required=true) private String roleName; /** - * @return The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * @return The name of the AWS IAM role to be created for Unity Catalog. * */ public String roleName() { @@ -125,7 +125,7 @@ public Builder externalId(String externalId) { } /** - * @param roleName The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * @param roleName The name of the AWS IAM role to be created for Unity Catalog. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfo.java index e3ef5e04..3fbbdc11 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfo.java @@ -129,6 +129,13 @@ public Optional encryp return Optional.ofNullable(this.encryptionDetails); } + @Import(name="isolationMode") + private @Nullable String isolationMode; + + public Optional isolationMode() { + return Optional.ofNullable(this.isolationMode); + } + /** * Unique identifier of the parent Metastore. * @@ -245,6 +252,7 @@ private GetExternalLocationExternalLocationInfo(GetExternalLocationExternalLocat this.credentialId = $.credentialId; this.credentialName = $.credentialName; this.encryptionDetails = $.encryptionDetails; + this.isolationMode = $.isolationMode; this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; @@ -354,6 +362,11 @@ public Builder encryptionDetails(@Nullable GetExternalLocationExternalLocationIn return this; } + public Builder isolationMode(@Nullable String isolationMode) { + $.isolationMode = isolationMode; + return this; + } + /** * @param metastoreId Unique identifier of the parent Metastore. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfoArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfoArgs.java index 30ef3bcc..ece697dc 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfoArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetExternalLocationExternalLocationInfoArgs.java @@ -130,6 +130,13 @@ public Optional isolationMode; + + public Optional> isolationMode() { + return Optional.ofNullable(this.isolationMode); + } + /** * Unique identifier of the parent Metastore. * @@ -246,6 +253,7 @@ private GetExternalLocationExternalLocationInfoArgs(GetExternalLocationExternalL this.credentialId = $.credentialId; this.credentialName = $.credentialName; this.encryptionDetails = $.encryptionDetails; + this.isolationMode = $.isolationMode; this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; @@ -429,6 +437,15 @@ public Builder encryptionDetails(GetExternalLocationExternalLocationInfoEncrypti return encryptionDetails(Output.of(encryptionDetails)); } + public Builder isolationMode(@Nullable Output isolationMode) { + $.isolationMode = isolationMode; + return this; + } + + public Builder isolationMode(String isolationMode) { + return isolationMode(Output.of(isolationMode)); + } + /** * @param metastoreId Unique identifier of the parent Metastore. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotifications.java index 7bec3612..750fdcdc 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotifications.java @@ -44,6 +44,13 @@ public Optional> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable List onStreamingBacklogExceededs; + + public Optional> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable List onSuccesses; @@ -58,6 +65,7 @@ private GetJobJobSettingsSettingsEmailNotifications(GetJobJobSettingsSettingsEma this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -111,6 +119,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable List onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.java index 78e332ac..2a20da68 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsEmailNotificationsArgs.java @@ -45,6 +45,13 @@ public Optional>> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable Output> onSuccesses; @@ -59,6 +66,7 @@ private GetJobJobSettingsSettingsEmailNotificationsArgs(GetJobJobSettingsSetting this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -128,6 +136,19 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable Output> onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotifications.java index 5070a862..784ae4bf 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotifications.java @@ -44,6 +44,13 @@ public Optional> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable List onStreamingBacklogExceededs; + + public Optional> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable List onSuccesses; @@ -58,6 +65,7 @@ private GetJobJobSettingsSettingsTaskEmailNotifications(GetJobJobSettingsSetting this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -111,6 +119,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable List onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.java index 024aa152..79892270 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskEmailNotificationsArgs.java @@ -45,6 +45,13 @@ public Optional>> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable Output> onSuccesses; @@ -59,6 +66,7 @@ private GetJobJobSettingsSettingsTaskEmailNotificationsArgs(GetJobJobSettingsSet this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -128,6 +136,19 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable Output> onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java index 4da9aedf..170b7ef4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java @@ -44,6 +44,13 @@ public Optional> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable List onStreamingBacklogExceededs; + + public Optional> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable List onSuccesses; @@ -58,6 +65,7 @@ private GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications(GetJobJob this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -111,6 +119,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable List onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.java index 80908416..bcb324c4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs.java @@ -45,6 +45,13 @@ public Optional>> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable Output> onSuccesses; @@ -59,6 +66,7 @@ private GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsArgs(GetJo this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -128,6 +136,19 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable Output> onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java index 3347d082..c8f1fb78 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -39,6 +40,13 @@ public Optional onStreamingBacklogExceededs; + + public Optional> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable List onSuccesses; @@ -52,6 +60,7 @@ private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications(GetJobJ this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -100,6 +109,15 @@ public Builder onStarts(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotif return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable List onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.java index 0119503f..1141a86c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs.java @@ -8,6 +8,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArgs; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs; import java.util.List; import java.util.Objects; @@ -40,6 +41,13 @@ public Optional> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable Output> onSuccesses; @@ -53,6 +61,7 @@ private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs(Get this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -113,6 +122,19 @@ public Builder onStarts(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotif return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable Output> onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..9640b590 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded extends com.pulumi.resources.InvokeArgs { + + public static final GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded Empty = new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(); + + /** + * the id of databricks.Job if the resource was matched by name. + * + */ + @Import(name="id", required=true) + private String id; + + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public String id() { + return this.id; + } + + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded() {} + + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded $; + + public Builder() { + $ = new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(); + } + + public Builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + $ = new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(Objects.requireNonNull(defaults)); + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(String id) { + $.id = id; + return this; + } + + public GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java new file mode 100644 index 00000000..9451d4f4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty = new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + + /** + * the id of databricks.Job if the resource was matched by name. + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public Output id() { + return this.id; + } + + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs() {} + + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs $; + + public Builder() { + $ = new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } + + public Builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + $ = new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java index 1ce9c451..4313ac1b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailure; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -39,6 +40,13 @@ public Optional> return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable List onStreamingBacklogExceededs; + + public Optional> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable List onSuccesses; @@ -52,6 +60,7 @@ private GetJobJobSettingsSettingsTaskWebhookNotifications(GetJobJobSettingsSetti this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -100,6 +109,15 @@ public Builder onStarts(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable List onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.java index 3eeeccd3..d6a2462a 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsArgs.java @@ -8,6 +8,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArgs; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArgs; import java.util.List; import java.util.Objects; @@ -40,6 +41,13 @@ public Optional> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable Output> onSuccesses; @@ -53,6 +61,7 @@ private GetJobJobSettingsSettingsTaskWebhookNotificationsArgs(GetJobJobSettingsS this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -113,6 +122,19 @@ public Builder onStarts(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable Output> onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..bbfc2c30 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded extends com.pulumi.resources.InvokeArgs { + + public static final GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded Empty = new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded(); + + /** + * the id of databricks.Job if the resource was matched by name. + * + */ + @Import(name="id", required=true) + private String id; + + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public String id() { + return this.id; + } + + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded() {} + + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded $; + + public Builder() { + $ = new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded(); + } + + public Builder(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + $ = new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded(Objects.requireNonNull(defaults)); + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(String id) { + $.id = id; + return this; + } + + public GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java new file mode 100644 index 00000000..e970d00f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty = new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + + /** + * the id of databricks.Job if the resource was matched by name. + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public Output id() { + return this.id; + } + + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs() {} + + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs $; + + public Builder() { + $ = new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } + + public Builder(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + $ = new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotifications.java index 86ae338e..52afb2ca 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnFailure; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnStart; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -39,6 +40,13 @@ public Optional> onSt return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable List onStreamingBacklogExceededs; + + public Optional> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable List onSuccesses; @@ -52,6 +60,7 @@ private GetJobJobSettingsSettingsWebhookNotifications(GetJobJobSettingsSettingsW this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -100,6 +109,15 @@ public Builder onStarts(GetJobJobSettingsSettingsWebhookNotificationsOnStart... return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable List onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.java index e636c640..b0231630 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsArgs.java @@ -8,6 +8,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceededArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnFailureArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnStartArgs; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArgs; import java.util.List; import java.util.Objects; @@ -40,6 +41,13 @@ public Optional> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + @Import(name="onSuccesses") private @Nullable Output> onSuccesses; @@ -53,6 +61,7 @@ private GetJobJobSettingsSettingsWebhookNotificationsArgs(GetJobJobSettingsSetti this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -113,6 +122,19 @@ public Builder onStarts(GetJobJobSettingsSettingsWebhookNotificationsOnStartArgs return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + public Builder onSuccesses(@Nullable Output> onSuccesses) { $.onSuccesses = onSuccesses; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..55e48225 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded extends com.pulumi.resources.InvokeArgs { + + public static final GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded Empty = new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded(); + + /** + * the id of databricks.Job if the resource was matched by name. + * + */ + @Import(name="id", required=true) + private String id; + + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public String id() { + return this.id; + } + + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded() {} + + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded $; + + public Builder() { + $ = new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded(); + } + + public Builder(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded defaults) { + $ = new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded(Objects.requireNonNull(defaults)); + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(String id) { + $.id = id; + return this; + } + + public GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.java new file mode 100644 index 00000000..97de0d5a --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs Empty = new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs(); + + /** + * the id of databricks.Job if the resource was matched by name. + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public Output id() { + return this.id; + } + + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs() {} + + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs $; + + public Builder() { + $ = new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs(); + } + + public Builder(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + $ = new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id the id of databricks.Job if the resource was matched by name. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfo.java index ce93a9b7..f67a1b35 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfo.java @@ -140,6 +140,13 @@ public Optional id() { return Optional.ofNullable(this.id); } + @Import(name="isolationMode") + private @Nullable String isolationMode; + + public Optional isolationMode() { + return Optional.ofNullable(this.isolationMode); + } + /** * Unique identifier of the parent Metastore. * @@ -249,6 +256,7 @@ private GetStorageCredentialStorageCredentialInfo(GetStorageCredentialStorageCre this.createdBy = $.createdBy; this.databricksGcpServiceAccount = $.databricksGcpServiceAccount; this.id = $.id; + this.isolationMode = $.isolationMode; this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; @@ -363,6 +371,11 @@ public Builder id(@Nullable String id) { return this; } + public Builder isolationMode(@Nullable String isolationMode) { + $.isolationMode = isolationMode; + return this; + } + /** * @param metastoreId Unique identifier of the parent Metastore. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfoArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfoArgs.java index 5cc8be37..f6736c89 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfoArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetStorageCredentialStorageCredentialInfoArgs.java @@ -141,6 +141,13 @@ public Optional> id() { return Optional.ofNullable(this.id); } + @Import(name="isolationMode") + private @Nullable Output isolationMode; + + public Optional> isolationMode() { + return Optional.ofNullable(this.isolationMode); + } + /** * Unique identifier of the parent Metastore. * @@ -250,6 +257,7 @@ private GetStorageCredentialStorageCredentialInfoArgs(GetStorageCredentialStorag this.createdBy = $.createdBy; this.databricksGcpServiceAccount = $.databricksGcpServiceAccount; this.id = $.id; + this.isolationMode = $.isolationMode; this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; @@ -442,6 +450,15 @@ public Builder id(String id) { return id(Output.of(id)); } + public Builder isolationMode(@Nullable Output isolationMode) { + $.isolationMode = isolationMode; + return this; + } + + public Builder isolationMode(String isolationMode) { + return isolationMode(Output.of(isolationMode)); + } + /** * @param metastoreId Unique identifier of the parent Metastore. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobEmailNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobEmailNotificationsArgs.java index 7923bbed..691cff62 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobEmailNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobEmailNotificationsArgs.java @@ -81,6 +81,13 @@ public Optional>> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + /** * (List) list of emails to notify when the run completes successfully. * @@ -103,6 +110,7 @@ private JobEmailNotificationsArgs(JobEmailNotificationsArgs $) { this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -244,6 +252,19 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + /** * @param onSuccesses (List) list of emails to notify when the run completes successfully. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskEmailNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskEmailNotificationsArgs.java index 213b4cc6..78fa7250 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskEmailNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskEmailNotificationsArgs.java @@ -81,6 +81,13 @@ public Optional>> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + /** * (List) list of emails to notify when the run completes successfully. * @@ -103,6 +110,7 @@ private JobTaskEmailNotificationsArgs(JobTaskEmailNotificationsArgs $) { this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -244,6 +252,19 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + /** * @param onSuccesses (List) list of emails to notify when the run completes successfully. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.java index cbdf036e..b822d20c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskEmailNotificationsArgs.java @@ -81,6 +81,13 @@ public Optional>> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + /** * (List) list of emails to notify when the run completes successfully. * @@ -103,6 +110,7 @@ private JobTaskForEachTaskTaskEmailNotificationsArgs(JobTaskForEachTaskTaskEmail this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -244,6 +252,19 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + /** * @param onSuccesses (List) list of emails to notify when the run completes successfully. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.java index 9f9d0f5e..5d37ae85 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsArgs.java @@ -8,6 +8,7 @@ import com.pulumi.databricks.inputs.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs; import com.pulumi.databricks.inputs.JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs; import com.pulumi.databricks.inputs.JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs; +import com.pulumi.databricks.inputs.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs; import com.pulumi.databricks.inputs.JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs; import java.util.List; import java.util.Objects; @@ -72,6 +73,13 @@ public Optional> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -93,6 +101,7 @@ private JobTaskForEachTaskTaskWebhookNotificationsArgs(JobTaskForEachTaskTaskWeb this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -219,6 +228,19 @@ public Builder onStarts(JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs... return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + /** * @param onSuccesses (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java new file mode 100644 index 00000000..0f1f4d0b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs extends com.pulumi.resources.ResourceArgs { + + public static final JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty = new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + + /** + * ID of the job + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return ID of the job + * + */ + public Output id() { + return this.id; + } + + private JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs() {} + + private JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs $; + + public Builder() { + $ = new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } + + public Builder(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + $ = new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id ID of the job + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id ID of the job + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsArgs.java index 7c373e53..9a3ab77e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsArgs.java @@ -8,6 +8,7 @@ import com.pulumi.databricks.inputs.JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs; import com.pulumi.databricks.inputs.JobTaskWebhookNotificationsOnFailureArgs; import com.pulumi.databricks.inputs.JobTaskWebhookNotificationsOnStartArgs; +import com.pulumi.databricks.inputs.JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs; import com.pulumi.databricks.inputs.JobTaskWebhookNotificationsOnSuccessArgs; import java.util.List; import java.util.Objects; @@ -72,6 +73,13 @@ public Optional>> onStarts() return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -93,6 +101,7 @@ private JobTaskWebhookNotificationsArgs(JobTaskWebhookNotificationsArgs $) { this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -219,6 +228,19 @@ public Builder onStarts(JobTaskWebhookNotificationsOnStartArgs... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + /** * @param onSuccesses (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java new file mode 100644 index 00000000..33e8801f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs extends com.pulumi.resources.ResourceArgs { + + public static final JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs Empty = new JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + + /** + * ID of the job + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return ID of the job + * + */ + public Output id() { + return this.id; + } + + private JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs() {} + + private JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs(JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs $; + + public Builder() { + $ = new JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs(); + } + + public Builder(JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + $ = new JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id ID of the job + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id ID of the job + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerArgs.java index 5aaed5e2..b654b124 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerArgs.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.JobTriggerFileArrivalArgs; +import com.pulumi.databricks.inputs.JobTriggerPeriodicArgs; import com.pulumi.databricks.inputs.JobTriggerTableArgs; import com.pulumi.databricks.inputs.JobTriggerTableUpdateArgs; import java.lang.String; @@ -48,6 +49,13 @@ public Optional> pauseStatus() { return Optional.ofNullable(this.pauseStatus); } + @Import(name="periodic") + private @Nullable Output periodic; + + public Optional> periodic() { + return Optional.ofNullable(this.periodic); + } + @Import(name="table") private @Nullable Output table; @@ -67,6 +75,7 @@ private JobTriggerArgs() {} private JobTriggerArgs(JobTriggerArgs $) { this.fileArrival = $.fileArrival; this.pauseStatus = $.pauseStatus; + this.periodic = $.periodic; this.table = $.table; this.tableUpdate = $.tableUpdate; } @@ -131,6 +140,15 @@ public Builder pauseStatus(String pauseStatus) { return pauseStatus(Output.of(pauseStatus)); } + public Builder periodic(@Nullable Output periodic) { + $.periodic = periodic; + return this; + } + + public Builder periodic(JobTriggerPeriodicArgs periodic) { + return periodic(Output.of(periodic)); + } + public Builder table(@Nullable Output table) { $.table = table; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerPeriodicArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerPeriodicArgs.java new file mode 100644 index 00000000..ab949963 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTriggerPeriodicArgs.java @@ -0,0 +1,86 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; + + +public final class JobTriggerPeriodicArgs extends com.pulumi.resources.ResourceArgs { + + public static final JobTriggerPeriodicArgs Empty = new JobTriggerPeriodicArgs(); + + @Import(name="interval", required=true) + private Output interval; + + public Output interval() { + return this.interval; + } + + @Import(name="unit", required=true) + private Output unit; + + public Output unit() { + return this.unit; + } + + private JobTriggerPeriodicArgs() {} + + private JobTriggerPeriodicArgs(JobTriggerPeriodicArgs $) { + this.interval = $.interval; + this.unit = $.unit; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(JobTriggerPeriodicArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private JobTriggerPeriodicArgs $; + + public Builder() { + $ = new JobTriggerPeriodicArgs(); + } + + public Builder(JobTriggerPeriodicArgs defaults) { + $ = new JobTriggerPeriodicArgs(Objects.requireNonNull(defaults)); + } + + public Builder interval(Output interval) { + $.interval = interval; + return this; + } + + public Builder interval(Integer interval) { + return interval(Output.of(interval)); + } + + public Builder unit(Output unit) { + $.unit = unit; + return this; + } + + public Builder unit(String unit) { + return unit(Output.of(unit)); + } + + public JobTriggerPeriodicArgs build() { + if ($.interval == null) { + throw new MissingRequiredPropertyException("JobTriggerPeriodicArgs", "interval"); + } + if ($.unit == null) { + throw new MissingRequiredPropertyException("JobTriggerPeriodicArgs", "unit"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsArgs.java index 66de523d..39a35fb3 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsArgs.java @@ -8,6 +8,7 @@ import com.pulumi.databricks.inputs.JobWebhookNotificationsOnDurationWarningThresholdExceededArgs; import com.pulumi.databricks.inputs.JobWebhookNotificationsOnFailureArgs; import com.pulumi.databricks.inputs.JobWebhookNotificationsOnStartArgs; +import com.pulumi.databricks.inputs.JobWebhookNotificationsOnStreamingBacklogExceededArgs; import com.pulumi.databricks.inputs.JobWebhookNotificationsOnSuccessArgs; import java.util.List; import java.util.Objects; @@ -72,6 +73,13 @@ public Optional>> onStarts() { return Optional.ofNullable(this.onStarts); } + @Import(name="onStreamingBacklogExceededs") + private @Nullable Output> onStreamingBacklogExceededs; + + public Optional>> onStreamingBacklogExceededs() { + return Optional.ofNullable(this.onStreamingBacklogExceededs); + } + /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -93,6 +101,7 @@ private JobWebhookNotificationsArgs(JobWebhookNotificationsArgs $) { this.onDurationWarningThresholdExceededs = $.onDurationWarningThresholdExceededs; this.onFailures = $.onFailures; this.onStarts = $.onStarts; + this.onStreamingBacklogExceededs = $.onStreamingBacklogExceededs; this.onSuccesses = $.onSuccesses; } @@ -219,6 +228,19 @@ public Builder onStarts(JobWebhookNotificationsOnStartArgs... onStarts) { return onStarts(List.of(onStarts)); } + public Builder onStreamingBacklogExceededs(@Nullable Output> onStreamingBacklogExceededs) { + $.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + + public Builder onStreamingBacklogExceededs(List onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(Output.of(onStreamingBacklogExceededs)); + } + + public Builder onStreamingBacklogExceededs(JobWebhookNotificationsOnStreamingBacklogExceededArgs... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + /** * @param onSuccesses (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.java new file mode 100644 index 00000000..7487dad4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobWebhookNotificationsOnStreamingBacklogExceededArgs.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class JobWebhookNotificationsOnStreamingBacklogExceededArgs extends com.pulumi.resources.ResourceArgs { + + public static final JobWebhookNotificationsOnStreamingBacklogExceededArgs Empty = new JobWebhookNotificationsOnStreamingBacklogExceededArgs(); + + /** + * ID of the job + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return ID of the job + * + */ + public Output id() { + return this.id; + } + + private JobWebhookNotificationsOnStreamingBacklogExceededArgs() {} + + private JobWebhookNotificationsOnStreamingBacklogExceededArgs(JobWebhookNotificationsOnStreamingBacklogExceededArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(JobWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private JobWebhookNotificationsOnStreamingBacklogExceededArgs $; + + public Builder() { + $ = new JobWebhookNotificationsOnStreamingBacklogExceededArgs(); + } + + public Builder(JobWebhookNotificationsOnStreamingBacklogExceededArgs defaults) { + $ = new JobWebhookNotificationsOnStreamingBacklogExceededArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id ID of the job + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id ID of the job + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public JobWebhookNotificationsOnStreamingBacklogExceededArgs build() { + if ($.id == null) { + throw new MissingRequiredPropertyException("JobWebhookNotificationsOnStreamingBacklogExceededArgs", "id"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsLogDeliveryState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsLogDeliveryState.java index 8c5fd0ae..c5f17c95 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsLogDeliveryState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsLogDeliveryState.java @@ -168,14 +168,14 @@ public Optional> storageConfigurationId() { } /** - * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * */ @Import(name="workspaceIdsFilters") private @Nullable Output> workspaceIdsFilters; /** - * @return By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @return By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * */ public Optional>> workspaceIdsFilters() { @@ -427,7 +427,7 @@ public Builder storageConfigurationId(String storageConfigurationId) { } /** - * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * * @return builder * @@ -438,7 +438,7 @@ public Builder workspaceIdsFilters(@Nullable Output> workspaceIdsF } /** - * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * * @return builder * @@ -448,7 +448,7 @@ public Builder workspaceIdsFilters(List workspaceIdsFilters) { } /** - * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * @param workspaceIdsFilters By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/OnlineTableState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/OnlineTableState.java index a3ab2059..cf08c511 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/OnlineTableState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/OnlineTableState.java @@ -63,12 +63,20 @@ public Optional>> statuses() { return Optional.ofNullable(this.statuses); } + @Import(name="tableServingUrl") + private @Nullable Output tableServingUrl; + + public Optional> tableServingUrl() { + return Optional.ofNullable(this.tableServingUrl); + } + private OnlineTableState() {} private OnlineTableState(OnlineTableState $) { this.name = $.name; this.spec = $.spec; this.statuses = $.statuses; + this.tableServingUrl = $.tableServingUrl; } public static Builder builder() { @@ -162,6 +170,15 @@ public Builder statuses(OnlineTableStatusArgs... statuses) { return statuses(List.of(statuses)); } + public Builder tableServingUrl(@Nullable Output tableServingUrl) { + $.tableServingUrl = tableServingUrl; + return this; + } + + public Builder tableServingUrl(String tableServingUrl) { + return tableServingUrl(Output.of(tableServingUrl)); + } + public OnlineTableState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlPermissionsState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlPermissionsState.java index 4f0fa7ee..174737cc 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlPermissionsState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlPermissionsState.java @@ -63,9 +63,17 @@ public Optional> catalog() { return Optional.ofNullable(this.catalog); } + /** + * Id of an existing databricks_cluster, otherwise resource creation will fail. + * + */ @Import(name="clusterId") private @Nullable Output clusterId; + /** + * @return Id of an existing databricks_cluster, otherwise resource creation will fail. + * + */ public Optional> clusterId() { return Optional.ofNullable(this.clusterId); } @@ -216,11 +224,23 @@ public Builder catalog(Boolean catalog) { return catalog(Output.of(catalog)); } + /** + * @param clusterId Id of an existing databricks_cluster, otherwise resource creation will fail. + * + * @return builder + * + */ public Builder clusterId(@Nullable Output clusterId) { $.clusterId = clusterId; return this; } + /** + * @param clusterId Id of an existing databricks_cluster, otherwise resource creation will fail. + * + * @return builder + * + */ public Builder clusterId(String clusterId) { return clusterId(Output.of(clusterId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetExternalLocationExternalLocationInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetExternalLocationExternalLocationInfo.java index f53214bd..eab8b93a 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetExternalLocationExternalLocationInfo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetExternalLocationExternalLocationInfo.java @@ -50,6 +50,7 @@ public final class GetExternalLocationExternalLocationInfo { * */ private @Nullable GetExternalLocationExternalLocationInfoEncryptionDetails encryptionDetails; + private @Nullable String isolationMode; /** * @return Unique identifier of the parent Metastore. * @@ -139,6 +140,9 @@ public Optional credentialName() { public Optional encryptionDetails() { return Optional.ofNullable(this.encryptionDetails); } + public Optional isolationMode() { + return Optional.ofNullable(this.isolationMode); + } /** * @return Unique identifier of the parent Metastore. * @@ -206,6 +210,7 @@ public static final class Builder { private @Nullable String credentialId; private @Nullable String credentialName; private @Nullable GetExternalLocationExternalLocationInfoEncryptionDetails encryptionDetails; + private @Nullable String isolationMode; private @Nullable String metastoreId; private @Nullable String name; private @Nullable String owner; @@ -224,6 +229,7 @@ public Builder(GetExternalLocationExternalLocationInfo defaults) { this.credentialId = defaults.credentialId; this.credentialName = defaults.credentialName; this.encryptionDetails = defaults.encryptionDetails; + this.isolationMode = defaults.isolationMode; this.metastoreId = defaults.metastoreId; this.name = defaults.name; this.owner = defaults.owner; @@ -282,6 +288,12 @@ public Builder encryptionDetails(@Nullable GetExternalLocationExternalLocationIn return this; } @CustomType.Setter + public Builder isolationMode(@Nullable String isolationMode) { + + this.isolationMode = isolationMode; + return this; + } + @CustomType.Setter public Builder metastoreId(@Nullable String metastoreId) { this.metastoreId = metastoreId; @@ -333,6 +345,7 @@ public GetExternalLocationExternalLocationInfo build() { _resultValue.credentialId = credentialId; _resultValue.credentialName = credentialName; _resultValue.encryptionDetails = encryptionDetails; + _resultValue.isolationMode = isolationMode; _resultValue.metastoreId = metastoreId; _resultValue.name = name; _resultValue.owner = owner; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsEmailNotifications.java index dbceabc8..0cf598a8 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsEmailNotifications.java @@ -17,6 +17,7 @@ public final class GetJobJobSettingsSettingsEmailNotifications { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; private GetJobJobSettingsSettingsEmailNotifications() {} @@ -32,6 +33,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } public List onSuccesses() { return this.onSuccesses == null ? List.of() : this.onSuccesses; } @@ -49,6 +53,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(GetJobJobSettingsSettingsEmailNotifications defaults) { @@ -57,6 +62,7 @@ public Builder(GetJobJobSettingsSettingsEmailNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -94,6 +100,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -108,6 +123,7 @@ public GetJobJobSettingsSettingsEmailNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskEmailNotifications.java index ee35bf5c..e606d676 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskEmailNotifications.java @@ -17,6 +17,7 @@ public final class GetJobJobSettingsSettingsTaskEmailNotifications { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; private GetJobJobSettingsSettingsTaskEmailNotifications() {} @@ -32,6 +33,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } public List onSuccesses() { return this.onSuccesses == null ? List.of() : this.onSuccesses; } @@ -49,6 +53,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(GetJobJobSettingsSettingsTaskEmailNotifications defaults) { @@ -57,6 +62,7 @@ public Builder(GetJobJobSettingsSettingsTaskEmailNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -94,6 +100,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -108,6 +123,7 @@ public GetJobJobSettingsSettingsTaskEmailNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java index 811f04e8..c567307f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications.java @@ -17,6 +17,7 @@ public final class GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotification private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; private GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications() {} @@ -32,6 +33,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } public List onSuccesses() { return this.onSuccesses == null ? List.of() : this.onSuccesses; } @@ -49,6 +53,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications defaults) { @@ -57,6 +62,7 @@ public Builder(GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications de this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -94,6 +100,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -108,6 +123,7 @@ public GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java index 0f466c86..fead73b1 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart; +import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -17,6 +18,7 @@ public final class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificati private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications() {} @@ -29,6 +31,9 @@ public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } public List onSuccesses() { return this.onSuccesses == null ? List.of() : this.onSuccesses; } @@ -45,6 +50,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications defaults) { @@ -52,6 +58,7 @@ public Builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -83,6 +90,15 @@ public Builder onStarts(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotif return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -96,6 +112,7 @@ public GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications build() _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..bbcc0617 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + private String id; + + private GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded() {} + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + this.id = id; + return this; + } + public GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded build() { + final var _resultValue = new GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(); + _resultValue.id = id; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java index ef8a1829..838ec51a 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailure; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart; +import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -17,6 +18,7 @@ public final class GetJobJobSettingsSettingsTaskWebhookNotifications { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; private GetJobJobSettingsSettingsTaskWebhookNotifications() {} @@ -29,6 +31,9 @@ public List onFailur public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } public List onSuccesses() { return this.onSuccesses == null ? List.of() : this.onSuccesses; } @@ -45,6 +50,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(GetJobJobSettingsSettingsTaskWebhookNotifications defaults) { @@ -52,6 +58,7 @@ public Builder(GetJobJobSettingsSettingsTaskWebhookNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -83,6 +90,15 @@ public Builder onStarts(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -96,6 +112,7 @@ public GetJobJobSettingsSettingsTaskWebhookNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..efc1d5a1 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + private String id; + + private GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded() {} + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + this.id = id; + return this; + } + public GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded build() { + final var _resultValue = new GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded(); + _resultValue.id = id; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotifications.java index e4b01028..edbe03a6 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsWebhookNotificationsOnFailure; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStart; +import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -17,6 +18,7 @@ public final class GetJobJobSettingsSettingsWebhookNotifications { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; private GetJobJobSettingsSettingsWebhookNotifications() {} @@ -29,6 +31,9 @@ public List onFailures() public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } public List onSuccesses() { return this.onSuccesses == null ? List.of() : this.onSuccesses; } @@ -45,6 +50,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(GetJobJobSettingsSettingsWebhookNotifications defaults) { @@ -52,6 +58,7 @@ public Builder(GetJobJobSettingsSettingsWebhookNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -83,6 +90,15 @@ public Builder onStarts(GetJobJobSettingsSettingsWebhookNotificationsOnStart... return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -96,6 +112,7 @@ public GetJobJobSettingsSettingsWebhookNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..13e27726 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded { + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + private String id; + + private GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded() {} + /** + * @return the id of databricks.Job if the resource was matched by name. + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + this.id = id; + return this; + } + public GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded build() { + final var _resultValue = new GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded(); + _resultValue.id = id; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetStorageCredentialStorageCredentialInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetStorageCredentialStorageCredentialInfo.java index 69190bfe..82d1b730 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetStorageCredentialStorageCredentialInfo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetStorageCredentialStorageCredentialInfo.java @@ -55,6 +55,7 @@ public final class GetStorageCredentialStorageCredentialInfo { * */ private @Nullable String id; + private @Nullable String isolationMode; /** * @return Unique identifier of the parent Metastore. * @@ -143,6 +144,9 @@ public Optional id() { return Optional.ofNullable(this.id); } + public Optional isolationMode() { + return Optional.ofNullable(this.isolationMode); + } /** * @return Unique identifier of the parent Metastore. * @@ -207,6 +211,7 @@ public static final class Builder { private @Nullable String createdBy; private @Nullable GetStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccount databricksGcpServiceAccount; private @Nullable String id; + private @Nullable String isolationMode; private @Nullable String metastoreId; private @Nullable String name; private @Nullable String owner; @@ -226,6 +231,7 @@ public Builder(GetStorageCredentialStorageCredentialInfo defaults) { this.createdBy = defaults.createdBy; this.databricksGcpServiceAccount = defaults.databricksGcpServiceAccount; this.id = defaults.id; + this.isolationMode = defaults.isolationMode; this.metastoreId = defaults.metastoreId; this.name = defaults.name; this.owner = defaults.owner; @@ -290,6 +296,12 @@ public Builder id(@Nullable String id) { return this; } @CustomType.Setter + public Builder isolationMode(@Nullable String isolationMode) { + + this.isolationMode = isolationMode; + return this; + } + @CustomType.Setter public Builder metastoreId(@Nullable String metastoreId) { this.metastoreId = metastoreId; @@ -342,6 +354,7 @@ public GetStorageCredentialStorageCredentialInfo build() { _resultValue.createdBy = createdBy; _resultValue.databricksGcpServiceAccount = databricksGcpServiceAccount; _resultValue.id = id; + _resultValue.isolationMode = isolationMode; _resultValue.metastoreId = metastoreId; _resultValue.name = name; _resultValue.owner = owner; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobEmailNotifications.java index b63819f7..d379d2d1 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobEmailNotifications.java @@ -35,6 +35,7 @@ public final class JobEmailNotifications { * */ private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; /** * @return (List) list of emails to notify when the run completes successfully. * @@ -72,6 +73,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } /** * @return (List) list of emails to notify when the run completes successfully. * @@ -93,6 +97,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(JobEmailNotifications defaults) { @@ -101,6 +106,7 @@ public Builder(JobEmailNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -138,6 +144,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -152,6 +167,7 @@ public JobEmailNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskEmailNotifications.java index 8fd77e44..16ad2bbb 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskEmailNotifications.java @@ -35,6 +35,7 @@ public final class JobTaskEmailNotifications { * */ private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; /** * @return (List) list of emails to notify when the run completes successfully. * @@ -72,6 +73,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } /** * @return (List) list of emails to notify when the run completes successfully. * @@ -93,6 +97,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(JobTaskEmailNotifications defaults) { @@ -101,6 +106,7 @@ public Builder(JobTaskEmailNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -138,6 +144,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -152,6 +167,7 @@ public JobTaskEmailNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskEmailNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskEmailNotifications.java index 25595b62..f644f012 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskEmailNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskEmailNotifications.java @@ -35,6 +35,7 @@ public final class JobTaskForEachTaskTaskEmailNotifications { * */ private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; /** * @return (List) list of emails to notify when the run completes successfully. * @@ -72,6 +73,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } /** * @return (List) list of emails to notify when the run completes successfully. * @@ -93,6 +97,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(JobTaskForEachTaskTaskEmailNotifications defaults) { @@ -101,6 +106,7 @@ public Builder(JobTaskForEachTaskTaskEmailNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -138,6 +144,15 @@ public Builder onStarts(String... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(String... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -152,6 +167,7 @@ public JobTaskForEachTaskTaskEmailNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotifications.java index 33f70b11..ee2bc4fa 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.outputs.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.outputs.JobTaskForEachTaskTaskWebhookNotificationsOnFailure; import com.pulumi.databricks.outputs.JobTaskForEachTaskTaskWebhookNotificationsOnStart; +import com.pulumi.databricks.outputs.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.outputs.JobTaskForEachTaskTaskWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -33,6 +34,7 @@ public final class JobTaskForEachTaskTaskWebhookNotifications { * */ private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; /** * @return (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -65,6 +67,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } /** * @return (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -85,6 +90,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(JobTaskForEachTaskTaskWebhookNotifications defaults) { @@ -92,6 +98,7 @@ public Builder(JobTaskForEachTaskTaskWebhookNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -123,6 +130,15 @@ public Builder onStarts(JobTaskForEachTaskTaskWebhookNotificationsOnStart... onS return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -136,6 +152,7 @@ public JobTaskForEachTaskTaskWebhookNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..76bad452 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * @return ID of the job + * + */ + private String id; + + private JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded() {} + /** + * @return ID of the job + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + this.id = id; + return this; + } + public JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded build() { + final var _resultValue = new JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(); + _resultValue.id = id; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotifications.java index 9865c9e6..eb924c26 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.outputs.JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.outputs.JobTaskWebhookNotificationsOnFailure; import com.pulumi.databricks.outputs.JobTaskWebhookNotificationsOnStart; +import com.pulumi.databricks.outputs.JobTaskWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.outputs.JobTaskWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -33,6 +34,7 @@ public final class JobTaskWebhookNotifications { * */ private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; /** * @return (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -65,6 +67,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } /** * @return (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -85,6 +90,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(JobTaskWebhookNotifications defaults) { @@ -92,6 +98,7 @@ public Builder(JobTaskWebhookNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -123,6 +130,15 @@ public Builder onStarts(JobTaskWebhookNotificationsOnStart... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(JobTaskWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -136,6 +152,7 @@ public JobTaskWebhookNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..7a4e104f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class JobTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * @return ID of the job + * + */ + private String id; + + private JobTaskWebhookNotificationsOnStreamingBacklogExceeded() {} + /** + * @return ID of the job + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(JobTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(JobTaskWebhookNotificationsOnStreamingBacklogExceeded defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("JobTaskWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + this.id = id; + return this; + } + public JobTaskWebhookNotificationsOnStreamingBacklogExceeded build() { + final var _resultValue = new JobTaskWebhookNotificationsOnStreamingBacklogExceeded(); + _resultValue.id = id; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTrigger.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTrigger.java index dbd1eb0f..8888dd14 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTrigger.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTrigger.java @@ -5,6 +5,7 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.databricks.outputs.JobTriggerFileArrival; +import com.pulumi.databricks.outputs.JobTriggerPeriodic; import com.pulumi.databricks.outputs.JobTriggerTable; import com.pulumi.databricks.outputs.JobTriggerTableUpdate; import java.lang.String; @@ -24,6 +25,7 @@ public final class JobTrigger { * */ private @Nullable String pauseStatus; + private @Nullable JobTriggerPeriodic periodic; private @Nullable JobTriggerTable table; private @Nullable JobTriggerTableUpdate tableUpdate; @@ -42,6 +44,9 @@ public Optional fileArrival() { public Optional pauseStatus() { return Optional.ofNullable(this.pauseStatus); } + public Optional periodic() { + return Optional.ofNullable(this.periodic); + } public Optional table() { return Optional.ofNullable(this.table); } @@ -60,6 +65,7 @@ public static Builder builder(JobTrigger defaults) { public static final class Builder { private @Nullable JobTriggerFileArrival fileArrival; private @Nullable String pauseStatus; + private @Nullable JobTriggerPeriodic periodic; private @Nullable JobTriggerTable table; private @Nullable JobTriggerTableUpdate tableUpdate; public Builder() {} @@ -67,6 +73,7 @@ public Builder(JobTrigger defaults) { Objects.requireNonNull(defaults); this.fileArrival = defaults.fileArrival; this.pauseStatus = defaults.pauseStatus; + this.periodic = defaults.periodic; this.table = defaults.table; this.tableUpdate = defaults.tableUpdate; } @@ -84,6 +91,12 @@ public Builder pauseStatus(@Nullable String pauseStatus) { return this; } @CustomType.Setter + public Builder periodic(@Nullable JobTriggerPeriodic periodic) { + + this.periodic = periodic; + return this; + } + @CustomType.Setter public Builder table(@Nullable JobTriggerTable table) { this.table = table; @@ -99,6 +112,7 @@ public JobTrigger build() { final var _resultValue = new JobTrigger(); _resultValue.fileArrival = fileArrival; _resultValue.pauseStatus = pauseStatus; + _resultValue.periodic = periodic; _resultValue.table = table; _resultValue.tableUpdate = tableUpdate; return _resultValue; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTriggerPeriodic.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTriggerPeriodic.java new file mode 100644 index 00000000..38e2de2e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTriggerPeriodic.java @@ -0,0 +1,66 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class JobTriggerPeriodic { + private Integer interval; + private String unit; + + private JobTriggerPeriodic() {} + public Integer interval() { + return this.interval; + } + public String unit() { + return this.unit; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(JobTriggerPeriodic defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private Integer interval; + private String unit; + public Builder() {} + public Builder(JobTriggerPeriodic defaults) { + Objects.requireNonNull(defaults); + this.interval = defaults.interval; + this.unit = defaults.unit; + } + + @CustomType.Setter + public Builder interval(Integer interval) { + if (interval == null) { + throw new MissingRequiredPropertyException("JobTriggerPeriodic", "interval"); + } + this.interval = interval; + return this; + } + @CustomType.Setter + public Builder unit(String unit) { + if (unit == null) { + throw new MissingRequiredPropertyException("JobTriggerPeriodic", "unit"); + } + this.unit = unit; + return this; + } + public JobTriggerPeriodic build() { + final var _resultValue = new JobTriggerPeriodic(); + _resultValue.interval = interval; + _resultValue.unit = unit; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotifications.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotifications.java index ca28fab3..0d7c8c98 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotifications.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotifications.java @@ -7,6 +7,7 @@ import com.pulumi.databricks.outputs.JobWebhookNotificationsOnDurationWarningThresholdExceeded; import com.pulumi.databricks.outputs.JobWebhookNotificationsOnFailure; import com.pulumi.databricks.outputs.JobWebhookNotificationsOnStart; +import com.pulumi.databricks.outputs.JobWebhookNotificationsOnStreamingBacklogExceeded; import com.pulumi.databricks.outputs.JobWebhookNotificationsOnSuccess; import java.util.List; import java.util.Objects; @@ -33,6 +34,7 @@ public final class JobWebhookNotifications { * */ private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; /** * @return (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -65,6 +67,9 @@ public List onFailures() { public List onStarts() { return this.onStarts == null ? List.of() : this.onStarts; } + public List onStreamingBacklogExceededs() { + return this.onStreamingBacklogExceededs == null ? List.of() : this.onStreamingBacklogExceededs; + } /** * @return (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. * @@ -85,6 +90,7 @@ public static final class Builder { private @Nullable List onDurationWarningThresholdExceededs; private @Nullable List onFailures; private @Nullable List onStarts; + private @Nullable List onStreamingBacklogExceededs; private @Nullable List onSuccesses; public Builder() {} public Builder(JobWebhookNotifications defaults) { @@ -92,6 +98,7 @@ public Builder(JobWebhookNotifications defaults) { this.onDurationWarningThresholdExceededs = defaults.onDurationWarningThresholdExceededs; this.onFailures = defaults.onFailures; this.onStarts = defaults.onStarts; + this.onStreamingBacklogExceededs = defaults.onStreamingBacklogExceededs; this.onSuccesses = defaults.onSuccesses; } @@ -123,6 +130,15 @@ public Builder onStarts(JobWebhookNotificationsOnStart... onStarts) { return onStarts(List.of(onStarts)); } @CustomType.Setter + public Builder onStreamingBacklogExceededs(@Nullable List onStreamingBacklogExceededs) { + + this.onStreamingBacklogExceededs = onStreamingBacklogExceededs; + return this; + } + public Builder onStreamingBacklogExceededs(JobWebhookNotificationsOnStreamingBacklogExceeded... onStreamingBacklogExceededs) { + return onStreamingBacklogExceededs(List.of(onStreamingBacklogExceededs)); + } + @CustomType.Setter public Builder onSuccesses(@Nullable List onSuccesses) { this.onSuccesses = onSuccesses; @@ -136,6 +152,7 @@ public JobWebhookNotifications build() { _resultValue.onDurationWarningThresholdExceededs = onDurationWarningThresholdExceededs; _resultValue.onFailures = onFailures; _resultValue.onStarts = onStarts; + _resultValue.onStreamingBacklogExceededs = onStreamingBacklogExceededs; _resultValue.onSuccesses = onSuccesses; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.java new file mode 100644 index 00000000..bbf28b43 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobWebhookNotificationsOnStreamingBacklogExceeded.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class JobWebhookNotificationsOnStreamingBacklogExceeded { + /** + * @return ID of the job + * + */ + private String id; + + private JobWebhookNotificationsOnStreamingBacklogExceeded() {} + /** + * @return ID of the job + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(JobWebhookNotificationsOnStreamingBacklogExceeded defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(JobWebhookNotificationsOnStreamingBacklogExceeded defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("JobWebhookNotificationsOnStreamingBacklogExceeded", "id"); + } + this.id = id; + return this; + } + public JobWebhookNotificationsOnStreamingBacklogExceeded build() { + final var _resultValue = new JobWebhookNotificationsOnStreamingBacklogExceeded(); + _resultValue.id = id; + return _resultValue; + } + } +} diff --git a/sdk/nodejs/config/vars.ts b/sdk/nodejs/config/vars.ts index 7ec086c2..cc93460c 100644 --- a/sdk/nodejs/config/vars.ts +++ b/sdk/nodejs/config/vars.ts @@ -207,6 +207,14 @@ Object.defineProperty(exports, "retryTimeoutSeconds", { enumerable: true, }); +export declare const serverlessComputeId: string | undefined; +Object.defineProperty(exports, "serverlessComputeId", { + get() { + return __config.get("serverlessComputeId"); + }, + enumerable: true, +}); + export declare const skipVerify: boolean | undefined; Object.defineProperty(exports, "skipVerify", { get() { diff --git a/sdk/nodejs/getAwsAssumeRolePolicy.ts b/sdk/nodejs/getAwsAssumeRolePolicy.ts index fb1dee6b..d1cce78a 100644 --- a/sdk/nodejs/getAwsAssumeRolePolicy.ts +++ b/sdk/nodejs/getAwsAssumeRolePolicy.ts @@ -48,7 +48,7 @@ import * as utilities from "./utilities"; * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). */ @@ -137,7 +137,7 @@ export interface GetAwsAssumeRolePolicyResult { * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.getAwsCrossAccountPolicy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). */ diff --git a/sdk/nodejs/getAwsBucketPolicy.ts b/sdk/nodejs/getAwsBucketPolicy.ts index f6ab6038..1a69cea7 100644 --- a/sdk/nodejs/getAwsBucketPolicy.ts +++ b/sdk/nodejs/getAwsBucketPolicy.ts @@ -28,7 +28,7 @@ export interface GetAwsBucketPolicyArgs { bucket: string; databricksAccountId?: string; /** - * Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket */ databricksE2AccountId?: string; /** @@ -71,7 +71,7 @@ export interface GetAwsBucketPolicyOutputArgs { bucket: pulumi.Input; databricksAccountId?: pulumi.Input; /** - * Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + * Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket */ databricksE2AccountId?: pulumi.Input; /** diff --git a/sdk/nodejs/getAwsCrossAccountPolicy.ts b/sdk/nodejs/getAwsCrossAccountPolicy.ts index 6cc5913c..a9fda4c2 100644 --- a/sdk/nodejs/getAwsCrossAccountPolicy.ts +++ b/sdk/nodejs/getAwsCrossAccountPolicy.ts @@ -24,7 +24,7 @@ import * as utilities from "./utilities"; * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. @@ -113,7 +113,7 @@ export interface GetAwsCrossAccountPolicyResult { * * The following resources are used in the same context: * - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * * databricks.getAwsAssumeRolePolicy data to construct the necessary AWS STS assume role policy. * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. diff --git a/sdk/nodejs/getAwsUnityCatalogAssumeRolePolicy.ts b/sdk/nodejs/getAwsUnityCatalogAssumeRolePolicy.ts index 976a45f2..8f1e4db8 100644 --- a/sdk/nodejs/getAwsUnityCatalogAssumeRolePolicy.ts +++ b/sdk/nodejs/getAwsUnityCatalogAssumeRolePolicy.ts @@ -7,7 +7,7 @@ import * as utilities from "./utilities"; /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog assume role policy for you. + * This data source constructs the necessary AWS Unity Catalog assume role policy for you. * * ## Example Usage * @@ -33,7 +33,7 @@ import * as utilities from "./utilities"; * }); * const metastoreDataAccess = new aws.iam.Role("metastore_data_access", { * name: `${prefix}-uc-access`, - * assumeRolePolicy: passroleForUc.json, + * assumeRolePolicy: thisAwsIamPolicyDocument.json, * managedPolicyArns: [unityMetastore.arn], * }); * ``` @@ -62,7 +62,7 @@ export interface GetAwsUnityCatalogAssumeRolePolicyArgs { */ externalId: string; /** - * The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * The name of the AWS IAM role to be created for Unity Catalog. */ roleName: string; /** @@ -88,7 +88,7 @@ export interface GetAwsUnityCatalogAssumeRolePolicyResult { /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog assume role policy for you. + * This data source constructs the necessary AWS Unity Catalog assume role policy for you. * * ## Example Usage * @@ -114,7 +114,7 @@ export interface GetAwsUnityCatalogAssumeRolePolicyResult { * }); * const metastoreDataAccess = new aws.iam.Role("metastore_data_access", { * name: `${prefix}-uc-access`, - * assumeRolePolicy: passroleForUc.json, + * assumeRolePolicy: thisAwsIamPolicyDocument.json, * managedPolicyArns: [unityMetastore.arn], * }); * ``` @@ -136,7 +136,7 @@ export interface GetAwsUnityCatalogAssumeRolePolicyOutputArgs { */ externalId: pulumi.Input; /** - * The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + * The name of the AWS IAM role to be created for Unity Catalog. */ roleName: pulumi.Input; /** diff --git a/sdk/nodejs/getAwsUnityCatalogPolicy.ts b/sdk/nodejs/getAwsUnityCatalogPolicy.ts index c65b49f8..b0e63577 100644 --- a/sdk/nodejs/getAwsUnityCatalogPolicy.ts +++ b/sdk/nodejs/getAwsUnityCatalogPolicy.ts @@ -7,7 +7,7 @@ import * as utilities from "./utilities"; /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog policy for you. + * This data source constructs the necessary AWS Unity Catalog policy for you. * * ## Example Usage * @@ -33,7 +33,7 @@ import * as utilities from "./utilities"; * }); * const metastoreDataAccess = new aws.iam.Role("metastore_data_access", { * name: `${prefix}-uc-access`, - * assumeRolePolicy: passroleForUc.json, + * assumeRolePolicy: thisAwsIamPolicyDocument.json, * managedPolicyArns: [unityMetastore.arn], * }); * ``` @@ -91,7 +91,7 @@ export interface GetAwsUnityCatalogPolicyResult { /** * > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. * - * This data source constructs necessary AWS Unity Catalog policy for you. + * This data source constructs the necessary AWS Unity Catalog policy for you. * * ## Example Usage * @@ -117,7 +117,7 @@ export interface GetAwsUnityCatalogPolicyResult { * }); * const metastoreDataAccess = new aws.iam.Role("metastore_data_access", { * name: `${prefix}-uc-access`, - * assumeRolePolicy: passroleForUc.json, + * assumeRolePolicy: thisAwsIamPolicyDocument.json, * managedPolicyArns: [unityMetastore.arn], * }); * ``` diff --git a/sdk/nodejs/getMwsCredentials.ts b/sdk/nodejs/getMwsCredentials.ts index 031a8d6b..a0dbf772 100644 --- a/sdk/nodejs/getMwsCredentials.ts +++ b/sdk/nodejs/getMwsCredentials.ts @@ -32,7 +32,7 @@ import * as utilities from "./utilities"; * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). */ export function getMwsCredentials(args?: GetMwsCredentialsArgs, opts?: pulumi.InvokeOptions): Promise { args = args || {}; @@ -94,7 +94,7 @@ export interface GetMwsCredentialsResult { * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). */ export function getMwsCredentialsOutput(args?: GetMwsCredentialsOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { return pulumi.output(args).apply((a: any) => getMwsCredentials(a, opts)) diff --git a/sdk/nodejs/getMwsWorkspaces.ts b/sdk/nodejs/getMwsWorkspaces.ts index 5c349681..8039ccd0 100644 --- a/sdk/nodejs/getMwsWorkspaces.ts +++ b/sdk/nodejs/getMwsWorkspaces.ts @@ -27,7 +27,7 @@ import * as utilities from "./utilities"; * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment */ export function getMwsWorkspaces(args?: GetMwsWorkspacesArgs, opts?: pulumi.InvokeOptions): Promise { @@ -85,7 +85,7 @@ export interface GetMwsWorkspacesResult { * * The following resources are used in the same context: * - * * databricks.MwsWorkspaces to manage Databricks E2 Workspaces. + * * databricks.MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * * databricks.MetastoreAssignment */ export function getMwsWorkspacesOutput(args?: GetMwsWorkspacesOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { diff --git a/sdk/nodejs/ipAccessList.ts b/sdk/nodejs/ipAccessList.ts index 4750cb33..90d6e55f 100644 --- a/sdk/nodejs/ipAccessList.ts +++ b/sdk/nodejs/ipAccessList.ts @@ -36,7 +36,7 @@ import * as utilities from "./utilities"; * The following resources are often used in the same context: * * * End to end workspace management guide. - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). * * databricks.Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. diff --git a/sdk/nodejs/mwsCredentials.ts b/sdk/nodejs/mwsCredentials.ts index f8f9fc0d..086675fc 100644 --- a/sdk/nodejs/mwsCredentials.ts +++ b/sdk/nodejs/mwsCredentials.ts @@ -22,7 +22,7 @@ import * as utilities from "./utilities"; * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/nodejs/mwsCustomerManagedKeys.ts b/sdk/nodejs/mwsCustomerManagedKeys.ts index f4b103b6..e898d291 100644 --- a/sdk/nodejs/mwsCustomerManagedKeys.ts +++ b/sdk/nodejs/mwsCustomerManagedKeys.ts @@ -217,7 +217,7 @@ import * as utilities from "./utilities"; * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/nodejs/mwsLogDelivery.ts b/sdk/nodejs/mwsLogDelivery.ts index f514de61..7b4a7eda 100644 --- a/sdk/nodejs/mwsLogDelivery.ts +++ b/sdk/nodejs/mwsLogDelivery.ts @@ -60,7 +60,7 @@ import * as utilities from "./utilities"; * * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * * databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * @@ -135,7 +135,7 @@ export class MwsLogDelivery extends pulumi.CustomResource { */ public readonly storageConfigurationId!: pulumi.Output; /** - * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. */ public readonly workspaceIdsFilters!: pulumi.Output; @@ -242,7 +242,7 @@ export interface MwsLogDeliveryState { */ storageConfigurationId?: pulumi.Input; /** - * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. */ workspaceIdsFilters?: pulumi.Input[]>; } @@ -292,7 +292,7 @@ export interface MwsLogDeliveryArgs { */ storageConfigurationId: pulumi.Input; /** - * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + * By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. */ workspaceIdsFilters?: pulumi.Input[]>; } diff --git a/sdk/nodejs/mwsNetworks.ts b/sdk/nodejs/mwsNetworks.ts index 8c96d042..45994f15 100644 --- a/sdk/nodejs/mwsNetworks.ts +++ b/sdk/nodejs/mwsNetworks.ts @@ -69,13 +69,13 @@ import * as utilities from "./utilities"; * The following resources are used in the same context: * * * Provisioning Databricks on AWS guide. - * * Provisioning Databricks on AWS with PrivateLink guide. - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * * Provisioning Databricks on AWS with Private Link guide. + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * * Provisioning Databricks on GCP guide. * * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * * databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration. * * databricks.MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a databricks.MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html). - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/nodejs/mwsPrivateAccessSettings.ts b/sdk/nodejs/mwsPrivateAccessSettings.ts index d21215f0..35ae5d5b 100644 --- a/sdk/nodejs/mwsPrivateAccessSettings.ts +++ b/sdk/nodejs/mwsPrivateAccessSettings.ts @@ -77,12 +77,12 @@ import * as utilities from "./utilities"; * The following resources are used in the same context: * * * Provisioning Databricks on AWS guide. - * * Provisioning Databricks on AWS with PrivateLink guide. - * * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * * Provisioning Databricks on AWS with Private Link guide. + * * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * * databricks.MwsVpcEndpoint resources with Databricks such that they can be used as part of a databricks.MwsNetworks configuration. * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/nodejs/mwsStorageConfigurations.ts b/sdk/nodejs/mwsStorageConfigurations.ts index 3a70caac..8b3858bd 100644 --- a/sdk/nodejs/mwsStorageConfigurations.ts +++ b/sdk/nodejs/mwsStorageConfigurations.ts @@ -19,12 +19,12 @@ import * as utilities from "./utilities"; * The following resources are used in the same context: * * * Provisioning Databricks on AWS guide. - * * Provisioning Databricks on AWS with PrivateLink guide. + * * Provisioning Databricks on AWS with Private Link guide. * * databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS. * * databricks.MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * * databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * * databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * * databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * * databricks.MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). * * ## Import * diff --git a/sdk/nodejs/onlineTable.ts b/sdk/nodejs/onlineTable.ts index b96abf02..c05981e8 100644 --- a/sdk/nodejs/onlineTable.ts +++ b/sdk/nodejs/onlineTable.ts @@ -77,6 +77,7 @@ export class OnlineTable extends pulumi.CustomResource { * object describing status of the online table: */ public /*out*/ readonly statuses!: pulumi.Output; + public readonly tableServingUrl!: pulumi.Output; /** * Create a OnlineTable resource with the given unique name, arguments, and options. @@ -94,10 +95,12 @@ export class OnlineTable extends pulumi.CustomResource { resourceInputs["name"] = state ? state.name : undefined; resourceInputs["spec"] = state ? state.spec : undefined; resourceInputs["statuses"] = state ? state.statuses : undefined; + resourceInputs["tableServingUrl"] = state ? state.tableServingUrl : undefined; } else { const args = argsOrState as OnlineTableArgs | undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["spec"] = args ? args.spec : undefined; + resourceInputs["tableServingUrl"] = args ? args.tableServingUrl : undefined; resourceInputs["statuses"] = undefined /*out*/; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); @@ -121,6 +124,7 @@ export interface OnlineTableState { * object describing status of the online table: */ statuses?: pulumi.Input[]>; + tableServingUrl?: pulumi.Input; } /** @@ -135,4 +139,5 @@ export interface OnlineTableArgs { * object containing specification of the online table: */ spec?: pulumi.Input; + tableServingUrl?: pulumi.Input; } diff --git a/sdk/nodejs/provider.ts b/sdk/nodejs/provider.ts index 21c6bc33..ef6eaa6a 100644 --- a/sdk/nodejs/provider.ts +++ b/sdk/nodejs/provider.ts @@ -44,6 +44,7 @@ export class Provider extends pulumi.ProviderResource { public readonly metadataServiceUrl!: pulumi.Output; public readonly password!: pulumi.Output; public readonly profile!: pulumi.Output; + public readonly serverlessComputeId!: pulumi.Output; public readonly token!: pulumi.Output; public readonly username!: pulumi.Output; public readonly warehouseId!: pulumi.Output; @@ -84,6 +85,7 @@ export class Provider extends pulumi.ProviderResource { resourceInputs["profile"] = args ? args.profile : undefined; resourceInputs["rateLimit"] = pulumi.output(args ? args.rateLimit : undefined).apply(JSON.stringify); resourceInputs["retryTimeoutSeconds"] = pulumi.output(args ? args.retryTimeoutSeconds : undefined).apply(JSON.stringify); + resourceInputs["serverlessComputeId"] = args ? args.serverlessComputeId : undefined; resourceInputs["skipVerify"] = pulumi.output(args ? args.skipVerify : undefined).apply(JSON.stringify); resourceInputs["token"] = args?.token ? pulumi.secret(args.token) : undefined; resourceInputs["username"] = args ? args.username : undefined; @@ -125,6 +127,7 @@ export interface ProviderArgs { profile?: pulumi.Input; rateLimit?: pulumi.Input; retryTimeoutSeconds?: pulumi.Input; + serverlessComputeId?: pulumi.Input; skipVerify?: pulumi.Input; token?: pulumi.Input; username?: pulumi.Input; diff --git a/sdk/nodejs/sqlPermissions.ts b/sdk/nodejs/sqlPermissions.ts index 1ecb19b9..3b933ee9 100644 --- a/sdk/nodejs/sqlPermissions.ts +++ b/sdk/nodejs/sqlPermissions.ts @@ -110,6 +110,9 @@ export class SqlPermissions extends pulumi.CustomResource { * If this access control for the entire catalog. Defaults to `false`. */ public readonly catalog!: pulumi.Output; + /** + * Id of an existing databricks_cluster, otherwise resource creation will fail. + */ public readonly clusterId!: pulumi.Output; /** * Name of the database. Has default value of `default`. @@ -178,6 +181,9 @@ export interface SqlPermissionsState { * If this access control for the entire catalog. Defaults to `false`. */ catalog?: pulumi.Input; + /** + * Id of an existing databricks_cluster, otherwise resource creation will fail. + */ clusterId?: pulumi.Input; /** * Name of the database. Has default value of `default`. @@ -210,6 +216,9 @@ export interface SqlPermissionsArgs { * If this access control for the entire catalog. Defaults to `false`. */ catalog?: pulumi.Input; + /** + * Id of an existing databricks_cluster, otherwise resource creation will fail. + */ clusterId?: pulumi.Input; /** * Name of the database. Has default value of `default`. diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index 97aafa40..2c56f178 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -1311,6 +1311,7 @@ export interface GetExternalLocationExternalLocationInfo { * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). */ encryptionDetails?: inputs.GetExternalLocationExternalLocationInfoEncryptionDetails; + isolationMode?: string; /** * Unique identifier of the parent Metastore. */ @@ -1371,6 +1372,7 @@ export interface GetExternalLocationExternalLocationInfoArgs { * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). */ encryptionDetails?: pulumi.Input; + isolationMode?: pulumi.Input; /** * Unique identifier of the parent Metastore. */ @@ -1774,6 +1776,7 @@ export interface GetJobJobSettingsSettingsEmailNotifications { onDurationWarningThresholdExceededs?: string[]; onFailures?: string[]; onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; onSuccesses?: string[]; } @@ -1782,6 +1785,7 @@ export interface GetJobJobSettingsSettingsEmailNotificationsArgs { onDurationWarningThresholdExceededs?: pulumi.Input[]>; onFailures?: pulumi.Input[]>; onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; onSuccesses?: pulumi.Input[]>; } @@ -2790,6 +2794,7 @@ export interface GetJobJobSettingsSettingsTaskEmailNotifications { onDurationWarningThresholdExceededs?: string[]; onFailures?: string[]; onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; onSuccesses?: string[]; } @@ -2798,6 +2803,7 @@ export interface GetJobJobSettingsSettingsTaskEmailNotificationsArgs { onDurationWarningThresholdExceededs?: pulumi.Input[]>; onFailures?: pulumi.Input[]>; onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; onSuccesses?: pulumi.Input[]>; } @@ -2920,6 +2926,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications onDurationWarningThresholdExceededs?: string[]; onFailures?: string[]; onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; onSuccesses?: string[]; } @@ -2928,6 +2935,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotificationsA onDurationWarningThresholdExceededs?: pulumi.Input[]>; onFailures?: pulumi.Input[]>; onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; onSuccesses?: pulumi.Input[]>; } @@ -3495,6 +3503,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotification onDurationWarningThresholdExceededs?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded[]; onFailures?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure[]; onStarts?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded[]; onSuccesses?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess[]; } @@ -3502,6 +3511,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotification onDurationWarningThresholdExceededs?: pulumi.Input[]>; onFailures?: pulumi.Input[]>; onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; onSuccesses?: pulumi.Input[]>; } @@ -3547,6 +3557,20 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotification id: pulumi.Input; } +export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: string; +} + +export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: pulumi.Input; +} + export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess { /** * the id of databricks.Job if the resource was matched by name. @@ -4125,6 +4149,7 @@ export interface GetJobJobSettingsSettingsTaskWebhookNotifications { onDurationWarningThresholdExceededs?: inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded[]; onFailures?: inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailure[]; onStarts?: inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded[]; onSuccesses?: inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess[]; } @@ -4132,6 +4157,7 @@ export interface GetJobJobSettingsSettingsTaskWebhookNotificationsArgs { onDurationWarningThresholdExceededs?: pulumi.Input[]>; onFailures?: pulumi.Input[]>; onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; onSuccesses?: pulumi.Input[]>; } @@ -4177,6 +4203,20 @@ export interface GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArgs { id: pulumi.Input; } +export interface GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: string; +} + +export interface GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: pulumi.Input; +} + export interface GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess { /** * the id of databricks.Job if the resource was matched by name. @@ -4233,6 +4273,7 @@ export interface GetJobJobSettingsSettingsWebhookNotifications { onDurationWarningThresholdExceededs?: inputs.GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded[]; onFailures?: inputs.GetJobJobSettingsSettingsWebhookNotificationsOnFailure[]; onStarts?: inputs.GetJobJobSettingsSettingsWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: inputs.GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded[]; onSuccesses?: inputs.GetJobJobSettingsSettingsWebhookNotificationsOnSuccess[]; } @@ -4240,6 +4281,7 @@ export interface GetJobJobSettingsSettingsWebhookNotificationsArgs { onDurationWarningThresholdExceededs?: pulumi.Input[]>; onFailures?: pulumi.Input[]>; onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; onSuccesses?: pulumi.Input[]>; } @@ -4285,6 +4327,20 @@ export interface GetJobJobSettingsSettingsWebhookNotificationsOnStartArgs { id: pulumi.Input; } +export interface GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: string; +} + +export interface GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: pulumi.Input; +} + export interface GetJobJobSettingsSettingsWebhookNotificationsOnSuccess { /** * the id of databricks.Job if the resource was matched by name. @@ -4656,6 +4712,7 @@ export interface GetStorageCredentialStorageCredentialInfo { * Unique ID of storage credential. */ id?: string; + isolationMode?: string; /** * Unique identifier of the parent Metastore. */ @@ -4714,6 +4771,7 @@ export interface GetStorageCredentialStorageCredentialInfoArgs { * Unique ID of storage credential. */ id?: pulumi.Input; + isolationMode?: pulumi.Input; /** * Unique identifier of the parent Metastore. */ @@ -5359,6 +5417,7 @@ export interface JobEmailNotifications { * (List) list of emails to notify when the run starts. */ onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; /** * (List) list of emails to notify when the run completes successfully. */ @@ -6200,6 +6259,7 @@ export interface JobTaskEmailNotifications { * (List) list of emails to notify when the run starts. */ onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; /** * (List) list of emails to notify when the run completes successfully. */ @@ -6382,6 +6442,7 @@ export interface JobTaskForEachTaskTaskEmailNotifications { * (List) list of emails to notify when the run starts. */ onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; /** * (List) list of emails to notify when the run completes successfully. */ @@ -6910,6 +6971,7 @@ export interface JobTaskForEachTaskTaskWebhookNotifications { * (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. */ onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. */ @@ -6937,6 +6999,13 @@ export interface JobTaskForEachTaskTaskWebhookNotificationsOnStart { id: pulumi.Input; } +export interface JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * ID of the job + */ + id: pulumi.Input; +} + export interface JobTaskForEachTaskTaskWebhookNotificationsOnSuccess { /** * ID of the job @@ -7466,6 +7535,7 @@ export interface JobTaskWebhookNotifications { * (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. */ onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. */ @@ -7493,6 +7563,13 @@ export interface JobTaskWebhookNotificationsOnStart { id: pulumi.Input; } +export interface JobTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * ID of the job + */ + id: pulumi.Input; +} + export interface JobTaskWebhookNotificationsOnSuccess { /** * ID of the job @@ -7509,6 +7586,7 @@ export interface JobTrigger { * Indicate whether this trigger is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pauseStatus` field is omitted in the block, the server will default to using `UNPAUSED` as a value for `pauseStatus`. */ pauseStatus?: pulumi.Input; + periodic?: pulumi.Input; table?: pulumi.Input; tableUpdate?: pulumi.Input; } @@ -7528,6 +7606,11 @@ export interface JobTriggerFileArrival { waitAfterLastChangeSeconds?: pulumi.Input; } +export interface JobTriggerPeriodic { + interval: pulumi.Input; + unit: pulumi.Input; +} + export interface JobTriggerTable { condition?: pulumi.Input; minTimeBetweenTriggersSeconds?: pulumi.Input; @@ -7559,6 +7642,7 @@ export interface JobWebhookNotifications { * (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. */ onStarts?: pulumi.Input[]>; + onStreamingBacklogExceededs?: pulumi.Input[]>; /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. */ @@ -7586,6 +7670,13 @@ export interface JobWebhookNotificationsOnStart { id: pulumi.Input; } +export interface JobWebhookNotificationsOnStreamingBacklogExceeded { + /** + * ID of the job + */ + id: pulumi.Input; +} + export interface JobWebhookNotificationsOnSuccess { /** * ID of the job diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index 68b1316c..79f9f96a 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -901,6 +901,7 @@ export interface GetExternalLocationExternalLocationInfo { * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). */ encryptionDetails?: outputs.GetExternalLocationExternalLocationInfoEncryptionDetails; + isolationMode?: string; /** * Unique identifier of the parent Metastore. */ @@ -1120,6 +1121,7 @@ export interface GetJobJobSettingsSettingsEmailNotifications { onDurationWarningThresholdExceededs?: string[]; onFailures?: string[]; onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; onSuccesses?: string[]; } @@ -1628,6 +1630,7 @@ export interface GetJobJobSettingsSettingsTaskEmailNotifications { onDurationWarningThresholdExceededs?: string[]; onFailures?: string[]; onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; onSuccesses?: string[]; } @@ -1693,6 +1696,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications onDurationWarningThresholdExceededs?: string[]; onFailures?: string[]; onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; onSuccesses?: string[]; } @@ -1980,6 +1984,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotification onDurationWarningThresholdExceededs?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded[]; onFailures?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure[]; onStarts?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded[]; onSuccesses?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess[]; } @@ -2004,6 +2009,13 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotification id: string; } +export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: string; +} + export interface GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess { /** * the id of databricks.Job if the resource was matched by name. @@ -2295,6 +2307,7 @@ export interface GetJobJobSettingsSettingsTaskWebhookNotifications { onDurationWarningThresholdExceededs?: outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded[]; onFailures?: outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailure[]; onStarts?: outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded[]; onSuccesses?: outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess[]; } @@ -2319,6 +2332,13 @@ export interface GetJobJobSettingsSettingsTaskWebhookNotificationsOnStart { id: string; } +export interface GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: string; +} + export interface GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess { /** * the id of databricks.Job if the resource was matched by name. @@ -2349,6 +2369,7 @@ export interface GetJobJobSettingsSettingsWebhookNotifications { onDurationWarningThresholdExceededs?: outputs.GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded[]; onFailures?: outputs.GetJobJobSettingsSettingsWebhookNotificationsOnFailure[]; onStarts?: outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded[]; onSuccesses?: outputs.GetJobJobSettingsSettingsWebhookNotificationsOnSuccess[]; } @@ -2373,6 +2394,13 @@ export interface GetJobJobSettingsSettingsWebhookNotificationsOnStart { id: string; } +export interface GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded { + /** + * the id of databricks.Job if the resource was matched by name. + */ + id: string; +} + export interface GetJobJobSettingsSettingsWebhookNotificationsOnSuccess { /** * the id of databricks.Job if the resource was matched by name. @@ -2582,6 +2610,7 @@ export interface GetStorageCredentialStorageCredentialInfo { * Unique ID of storage credential. */ id?: string; + isolationMode?: string; /** * Unique identifier of the parent Metastore. */ @@ -3011,6 +3040,7 @@ export interface JobEmailNotifications { * (List) list of emails to notify when the run starts. */ onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; /** * (List) list of emails to notify when the run completes successfully. */ @@ -3852,6 +3882,7 @@ export interface JobTaskEmailNotifications { * (List) list of emails to notify when the run starts. */ onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; /** * (List) list of emails to notify when the run completes successfully. */ @@ -4034,6 +4065,7 @@ export interface JobTaskForEachTaskTaskEmailNotifications { * (List) list of emails to notify when the run starts. */ onStarts?: string[]; + onStreamingBacklogExceededs?: string[]; /** * (List) list of emails to notify when the run completes successfully. */ @@ -4562,6 +4594,7 @@ export interface JobTaskForEachTaskTaskWebhookNotifications { * (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. */ onStarts?: outputs.JobTaskForEachTaskTaskWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: outputs.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded[]; /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. */ @@ -4589,6 +4622,13 @@ export interface JobTaskForEachTaskTaskWebhookNotificationsOnStart { id: string; } +export interface JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * ID of the job + */ + id: string; +} + export interface JobTaskForEachTaskTaskWebhookNotificationsOnSuccess { /** * ID of the job @@ -5118,6 +5158,7 @@ export interface JobTaskWebhookNotifications { * (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. */ onStarts?: outputs.JobTaskWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: outputs.JobTaskWebhookNotificationsOnStreamingBacklogExceeded[]; /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. */ @@ -5145,6 +5186,13 @@ export interface JobTaskWebhookNotificationsOnStart { id: string; } +export interface JobTaskWebhookNotificationsOnStreamingBacklogExceeded { + /** + * ID of the job + */ + id: string; +} + export interface JobTaskWebhookNotificationsOnSuccess { /** * ID of the job @@ -5161,6 +5209,7 @@ export interface JobTrigger { * Indicate whether this trigger is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pauseStatus` field is omitted in the block, the server will default to using `UNPAUSED` as a value for `pauseStatus`. */ pauseStatus?: string; + periodic?: outputs.JobTriggerPeriodic; table?: outputs.JobTriggerTable; tableUpdate?: outputs.JobTriggerTableUpdate; } @@ -5180,6 +5229,11 @@ export interface JobTriggerFileArrival { waitAfterLastChangeSeconds?: number; } +export interface JobTriggerPeriodic { + interval: number; + unit: string; +} + export interface JobTriggerTable { condition?: string; minTimeBetweenTriggersSeconds?: number; @@ -5211,6 +5265,7 @@ export interface JobWebhookNotifications { * (List) list of notification IDs to call when the run starts. A maximum of 3 destinations can be specified. */ onStarts?: outputs.JobWebhookNotificationsOnStart[]; + onStreamingBacklogExceededs?: outputs.JobWebhookNotificationsOnStreamingBacklogExceeded[]; /** * (List) list of notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified. */ @@ -5238,6 +5293,13 @@ export interface JobWebhookNotificationsOnStart { id: string; } +export interface JobWebhookNotificationsOnStreamingBacklogExceeded { + /** + * ID of the job + */ + id: string; +} + export interface JobWebhookNotificationsOnSuccess { /** * ID of the job diff --git a/sdk/python/pulumi_databricks/_inputs.py b/sdk/python/pulumi_databricks/_inputs.py index 99ce0050..b784a5f8 100644 --- a/sdk/python/pulumi_databricks/_inputs.py +++ b/sdk/python/pulumi_databricks/_inputs.py @@ -210,6 +210,7 @@ 'JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs', 'JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs', 'JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs', + 'JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs', 'JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs', 'JobTaskHealthArgs', 'JobTaskHealthRuleArgs', @@ -264,15 +265,18 @@ 'JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs', 'JobTaskWebhookNotificationsOnFailureArgs', 'JobTaskWebhookNotificationsOnStartArgs', + 'JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs', 'JobTaskWebhookNotificationsOnSuccessArgs', 'JobTriggerArgs', 'JobTriggerFileArrivalArgs', + 'JobTriggerPeriodicArgs', 'JobTriggerTableArgs', 'JobTriggerTableUpdateArgs', 'JobWebhookNotificationsArgs', 'JobWebhookNotificationsOnDurationWarningThresholdExceededArgs', 'JobWebhookNotificationsOnFailureArgs', 'JobWebhookNotificationsOnStartArgs', + 'JobWebhookNotificationsOnStreamingBacklogExceededArgs', 'JobWebhookNotificationsOnSuccessArgs', 'LakehouseMonitorCustomMetricArgs', 'LakehouseMonitorDataClassificationConfigArgs', @@ -607,6 +611,7 @@ 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs', 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureArgs', 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArgs', + 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs', 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs', 'GetJobJobSettingsSettingsTaskHealthArgs', 'GetJobJobSettingsSettingsTaskHealthRuleArgs', @@ -655,6 +660,7 @@ 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs', 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureArgs', 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArgs', + 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs', 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArgs', 'GetJobJobSettingsSettingsTriggerArgs', 'GetJobJobSettingsSettingsTriggerFileArrivalArgs', @@ -663,6 +669,7 @@ 'GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceededArgs', 'GetJobJobSettingsSettingsWebhookNotificationsOnFailureArgs', 'GetJobJobSettingsSettingsWebhookNotificationsOnStartArgs', + 'GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs', 'GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArgs', 'GetMetastoreMetastoreInfoArgs', 'GetMlflowExperimentTagArgs', @@ -3315,6 +3322,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_failures: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_starts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + on_streaming_backlog_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_successes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[bool] no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). @@ -3333,6 +3341,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -3386,6 +3396,15 @@ def on_starts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: def on_starts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: @@ -8039,6 +8058,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_failures: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_starts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + on_streaming_backlog_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_successes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[bool] no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). @@ -8057,6 +8077,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -8110,6 +8132,15 @@ def on_starts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: def on_starts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: @@ -8811,6 +8842,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_failures: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_starts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + on_streaming_backlog_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, on_successes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[bool] no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). @@ -8829,6 +8861,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -8882,6 +8916,15 @@ def on_starts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: def on_starts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: @@ -11559,6 +11602,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs']]]] = None, on_failures: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnFailureArgs']]]] = None, on_starts: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs']]]] = None, + on_streaming_backlog_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]]] = None, on_successes: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs']]] on_duration_warning_threshold_exceededs: (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the `RUN_DURATION_SECONDS` metric in the `health` block. @@ -11576,6 +11620,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -11619,6 +11665,15 @@ def on_starts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEa def on_starts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnStartArgs']]]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs']]]]: @@ -11698,6 +11753,28 @@ def id(self, value: pulumi.Input[str]): pulumi.set(self, "id", value) +@pulumi.input_type +class JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs: + def __init__(__self__, *, + id: pulumi.Input[str]): + """ + :param pulumi.Input[str] id: ID of the job + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> pulumi.Input[str]: + """ + ID of the job + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: pulumi.Input[str]): + pulumi.set(self, "id", value) + + @pulumi.input_type class JobTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs: def __init__(__self__, *, @@ -14384,6 +14461,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs']]]] = None, on_failures: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnFailureArgs']]]] = None, on_starts: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnStartArgs']]]] = None, + on_streaming_backlog_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]]] = None, on_successes: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnSuccessArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs']]] on_duration_warning_threshold_exceededs: (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the `RUN_DURATION_SECONDS` metric in the `health` block. @@ -14401,6 +14479,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -14444,6 +14524,15 @@ def on_starts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebho def on_starts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnStartArgs']]]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobTaskWebhookNotificationsOnSuccessArgs']]]]: @@ -14523,6 +14612,28 @@ def id(self, value: pulumi.Input[str]): pulumi.set(self, "id", value) +@pulumi.input_type +class JobTaskWebhookNotificationsOnStreamingBacklogExceededArgs: + def __init__(__self__, *, + id: pulumi.Input[str]): + """ + :param pulumi.Input[str] id: ID of the job + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> pulumi.Input[str]: + """ + ID of the job + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: pulumi.Input[str]): + pulumi.set(self, "id", value) + + @pulumi.input_type class JobTaskWebhookNotificationsOnSuccessArgs: def __init__(__self__, *, @@ -14550,6 +14661,7 @@ class JobTriggerArgs: def __init__(__self__, *, file_arrival: Optional[pulumi.Input['JobTriggerFileArrivalArgs']] = None, pause_status: Optional[pulumi.Input[str]] = None, + periodic: Optional[pulumi.Input['JobTriggerPeriodicArgs']] = None, table: Optional[pulumi.Input['JobTriggerTableArgs']] = None, table_update: Optional[pulumi.Input['JobTriggerTableUpdateArgs']] = None): """ @@ -14560,6 +14672,8 @@ def __init__(__self__, *, pulumi.set(__self__, "file_arrival", file_arrival) if pause_status is not None: pulumi.set(__self__, "pause_status", pause_status) + if periodic is not None: + pulumi.set(__self__, "periodic", periodic) if table is not None: pulumi.set(__self__, "table", table) if table_update is not None: @@ -14589,6 +14703,15 @@ def pause_status(self) -> Optional[pulumi.Input[str]]: def pause_status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "pause_status", value) + @property + @pulumi.getter + def periodic(self) -> Optional[pulumi.Input['JobTriggerPeriodicArgs']]: + return pulumi.get(self, "periodic") + + @periodic.setter + def periodic(self, value: Optional[pulumi.Input['JobTriggerPeriodicArgs']]): + pulumi.set(self, "periodic", value) + @property @pulumi.getter def table(self) -> Optional[pulumi.Input['JobTriggerTableArgs']]: @@ -14662,6 +14785,33 @@ def wait_after_last_change_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "wait_after_last_change_seconds", value) +@pulumi.input_type +class JobTriggerPeriodicArgs: + def __init__(__self__, *, + interval: pulumi.Input[int], + unit: pulumi.Input[str]): + pulumi.set(__self__, "interval", interval) + pulumi.set(__self__, "unit", unit) + + @property + @pulumi.getter + def interval(self) -> pulumi.Input[int]: + return pulumi.get(self, "interval") + + @interval.setter + def interval(self, value: pulumi.Input[int]): + pulumi.set(self, "interval", value) + + @property + @pulumi.getter + def unit(self) -> pulumi.Input[str]: + return pulumi.get(self, "unit") + + @unit.setter + def unit(self, value: pulumi.Input[str]): + pulumi.set(self, "unit", value) + + @pulumi.input_type class JobTriggerTableArgs: def __init__(__self__, *, @@ -14773,6 +14923,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnDurationWarningThresholdExceededArgs']]]] = None, on_failures: Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnFailureArgs']]]] = None, on_starts: Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnStartArgs']]]] = None, + on_streaming_backlog_exceededs: Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnStreamingBacklogExceededArgs']]]] = None, on_successes: Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnSuccessArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnDurationWarningThresholdExceededArgs']]] on_duration_warning_threshold_exceededs: (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the `RUN_DURATION_SECONDS` metric in the `health` block. @@ -14790,6 +14941,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -14833,6 +14986,15 @@ def on_starts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNo def on_starts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnStartArgs']]]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnStreamingBacklogExceededArgs']]]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnStreamingBacklogExceededArgs']]]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobWebhookNotificationsOnSuccessArgs']]]]: @@ -14912,6 +15074,28 @@ def id(self, value: pulumi.Input[str]): pulumi.set(self, "id", value) +@pulumi.input_type +class JobWebhookNotificationsOnStreamingBacklogExceededArgs: + def __init__(__self__, *, + id: pulumi.Input[str]): + """ + :param pulumi.Input[str] id: ID of the job + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> pulumi.Input[str]: + """ + ID of the job + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: pulumi.Input[str]): + pulumi.set(self, "id", value) + + @pulumi.input_type class JobWebhookNotificationsOnSuccessArgs: def __init__(__self__, *, @@ -24887,6 +25071,7 @@ def __init__(__self__, *, credential_id: Optional[str] = None, credential_name: Optional[str] = None, encryption_details: Optional['GetExternalLocationExternalLocationInfoEncryptionDetailsArgs'] = None, + isolation_mode: Optional[str] = None, metastore_id: Optional[str] = None, name: Optional[str] = None, owner: Optional[str] = None, @@ -24926,6 +25111,8 @@ def __init__(__self__, *, pulumi.set(__self__, "credential_name", credential_name) if encryption_details is not None: pulumi.set(__self__, "encryption_details", encryption_details) + if isolation_mode is not None: + pulumi.set(__self__, "isolation_mode", isolation_mode) if metastore_id is not None: pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: @@ -25034,6 +25221,15 @@ def encryption_details(self) -> Optional['GetExternalLocationExternalLocationInf def encryption_details(self, value: Optional['GetExternalLocationExternalLocationInfoEncryptionDetailsArgs']): pulumi.set(self, "encryption_details", value) + @property + @pulumi.getter(name="isolationMode") + def isolation_mode(self) -> Optional[str]: + return pulumi.get(self, "isolation_mode") + + @isolation_mode.setter + def isolation_mode(self, value: Optional[str]): + pulumi.set(self, "isolation_mode", value) + @property @pulumi.getter(name="metastoreId") def metastore_id(self) -> Optional[str]: @@ -26503,6 +26699,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): if no_alert_for_skipped_runs is not None: pulumi.set(__self__, "no_alert_for_skipped_runs", no_alert_for_skipped_runs) @@ -26512,6 +26709,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -26551,6 +26750,15 @@ def on_starts(self) -> Optional[Sequence[str]]: def on_starts(self, value: Optional[Sequence[str]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[Sequence[str]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -30261,6 +30469,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): if no_alert_for_skipped_runs is not None: pulumi.set(__self__, "no_alert_for_skipped_runs", no_alert_for_skipped_runs) @@ -30270,6 +30479,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -30309,6 +30520,15 @@ def on_starts(self) -> Optional[Sequence[str]]: def on_starts(self, value: Optional[Sequence[str]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[Sequence[str]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -30846,6 +31066,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): if no_alert_for_skipped_runs is not None: pulumi.set(__self__, "no_alert_for_skipped_runs", no_alert_for_skipped_runs) @@ -30855,6 +31076,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -30894,6 +31117,15 @@ def on_starts(self) -> Optional[Sequence[str]]: def on_starts(self, value: Optional[Sequence[str]]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[Sequence[str]]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -32897,6 +33129,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs']] = None, on_failures: Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureArgs']] = None, on_starts: Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArgs']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs']] = None, on_successes: Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs']] = None): if on_duration_warning_threshold_exceededs is not None: pulumi.set(__self__, "on_duration_warning_threshold_exceededs", on_duration_warning_threshold_exceededs) @@ -32904,6 +33137,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -32934,6 +33169,15 @@ def on_starts(self) -> Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTa def on_starts(self, value: Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartArgs']]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs']]: @@ -33010,6 +33254,28 @@ def id(self, value: str): pulumi.set(self, "id", value) +@pulumi.input_type +class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededArgs: + def __init__(__self__, *, + id: str): + """ + :param str id: the id of Job if the resource was matched by name. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + the id of Job if the resource was matched by name. + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: str): + pulumi.set(self, "id", value) + + @pulumi.input_type class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessArgs: def __init__(__self__, *, @@ -35025,6 +35291,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs']] = None, on_failures: Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureArgs']] = None, on_starts: Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArgs']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs']] = None, on_successes: Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArgs']] = None): if on_duration_warning_threshold_exceededs is not None: pulumi.set(__self__, "on_duration_warning_threshold_exceededs", on_duration_warning_threshold_exceededs) @@ -35032,6 +35299,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -35062,6 +35331,15 @@ def on_starts(self) -> Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNo def on_starts(self, value: Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartArgs']]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs']]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArgs']]: @@ -35138,6 +35416,28 @@ def id(self, value: str): pulumi.set(self, "id", value) +@pulumi.input_type +class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededArgs: + def __init__(__self__, *, + id: str): + """ + :param str id: the id of Job if the resource was matched by name. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + the id of Job if the resource was matched by name. + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: str): + pulumi.set(self, "id", value) + + @pulumi.input_type class GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessArgs: def __init__(__self__, *, @@ -35299,6 +35599,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceededArgs']] = None, on_failures: Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnFailureArgs']] = None, on_starts: Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnStartArgs']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs']] = None, on_successes: Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArgs']] = None): if on_duration_warning_threshold_exceededs is not None: pulumi.set(__self__, "on_duration_warning_threshold_exceededs", on_duration_warning_threshold_exceededs) @@ -35306,6 +35607,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -35336,6 +35639,15 @@ def on_starts(self) -> Optional[Sequence['GetJobJobSettingsSettingsWebhookNotifi def on_starts(self, value: Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnStartArgs']]): pulumi.set(self, "on_starts", value) + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + + @on_streaming_backlog_exceededs.setter + def on_streaming_backlog_exceededs(self, value: Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs']]): + pulumi.set(self, "on_streaming_backlog_exceededs", value) + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArgs']]: @@ -35412,6 +35724,28 @@ def id(self, value: str): pulumi.set(self, "id", value) +@pulumi.input_type +class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededArgs: + def __init__(__self__, *, + id: str): + """ + :param str id: the id of Job if the resource was matched by name. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + the id of Job if the resource was matched by name. + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: str): + pulumi.set(self, "id", value) + + @pulumi.input_type class GetJobJobSettingsSettingsWebhookNotificationsOnSuccessArgs: def __init__(__self__, *, @@ -36426,6 +36760,7 @@ def __init__(__self__, *, created_by: Optional[str] = None, databricks_gcp_service_account: Optional['GetStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccountArgs'] = None, id: Optional[str] = None, + isolation_mode: Optional[str] = None, metastore_id: Optional[str] = None, name: Optional[str] = None, owner: Optional[str] = None, @@ -36466,6 +36801,8 @@ def __init__(__self__, *, pulumi.set(__self__, "databricks_gcp_service_account", databricks_gcp_service_account) if id is not None: pulumi.set(__self__, "id", id) + if isolation_mode is not None: + pulumi.set(__self__, "isolation_mode", isolation_mode) if metastore_id is not None: pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: @@ -36583,6 +36920,15 @@ def id(self) -> Optional[str]: def id(self, value: Optional[str]): pulumi.set(self, "id", value) + @property + @pulumi.getter(name="isolationMode") + def isolation_mode(self) -> Optional[str]: + return pulumi.get(self, "isolation_mode") + + @isolation_mode.setter + def isolation_mode(self, value: Optional[str]): + pulumi.set(self, "isolation_mode", value) + @property @pulumi.getter(name="metastoreId") def metastore_id(self) -> Optional[str]: diff --git a/sdk/python/pulumi_databricks/config/__init__.pyi b/sdk/python/pulumi_databricks/config/__init__.pyi index 89961853..434a8a4a 100644 --- a/sdk/python/pulumi_databricks/config/__init__.pyi +++ b/sdk/python/pulumi_databricks/config/__init__.pyi @@ -59,6 +59,8 @@ rateLimit: Optional[int] retryTimeoutSeconds: Optional[int] +serverlessComputeId: Optional[str] + skipVerify: Optional[bool] token: Optional[str] diff --git a/sdk/python/pulumi_databricks/config/vars.py b/sdk/python/pulumi_databricks/config/vars.py index 39f54a4a..a94c1803 100644 --- a/sdk/python/pulumi_databricks/config/vars.py +++ b/sdk/python/pulumi_databricks/config/vars.py @@ -115,6 +115,10 @@ def rate_limit(self) -> Optional[int]: def retry_timeout_seconds(self) -> Optional[int]: return __config__.get_int('retryTimeoutSeconds') + @property + def serverless_compute_id(self) -> Optional[str]: + return __config__.get('serverlessComputeId') + @property def skip_verify(self) -> Optional[bool]: return __config__.get_bool('skipVerify') diff --git a/sdk/python/pulumi_databricks/get_aws_assume_role_policy.py b/sdk/python/pulumi_databricks/get_aws_assume_role_policy.py index fde8b81b..74c8a498 100644 --- a/sdk/python/pulumi_databricks/get_aws_assume_role_policy.py +++ b/sdk/python/pulumi_databricks/get_aws_assume_role_policy.py @@ -125,7 +125,7 @@ def get_aws_assume_role_policy(databricks_account_id: Optional[str] = None, The following resources are used in the same context: - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * get_aws_bucket_policy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * get_aws_cross_account_policy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). @@ -191,7 +191,7 @@ def get_aws_assume_role_policy_output(databricks_account_id: Optional[pulumi.Inp The following resources are used in the same context: - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * get_aws_bucket_policy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * get_aws_cross_account_policy data to construct the necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). diff --git a/sdk/python/pulumi_databricks/get_aws_bucket_policy.py b/sdk/python/pulumi_databricks/get_aws_bucket_policy.py index f56c33eb..19064cdc 100644 --- a/sdk/python/pulumi_databricks/get_aws_bucket_policy.py +++ b/sdk/python/pulumi_databricks/get_aws_bucket_policy.py @@ -102,7 +102,7 @@ def get_aws_bucket_policy(bucket: Optional[str] = None, :param str bucket: AWS S3 Bucket name for which to generate the policy document. - :param str databricks_e2_account_id: Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + :param str databricks_e2_account_id: Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket :param str full_access_role: Data access role that can have full access for this bucket """ __args__ = dict() @@ -133,7 +133,7 @@ def get_aws_bucket_policy_output(bucket: Optional[pulumi.Input[str]] = None, :param str bucket: AWS S3 Bucket name for which to generate the policy document. - :param str databricks_e2_account_id: Your Databricks E2 account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket + :param str databricks_e2_account_id: Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket :param str full_access_role: Data access role that can have full access for this bucket """ ... diff --git a/sdk/python/pulumi_databricks/get_aws_cross_account_policy.py b/sdk/python/pulumi_databricks/get_aws_cross_account_policy.py index 17a318b9..987557dd 100644 --- a/sdk/python/pulumi_databricks/get_aws_cross_account_policy.py +++ b/sdk/python/pulumi_databricks/get_aws_cross_account_policy.py @@ -137,7 +137,7 @@ def get_aws_cross_account_policy(aws_account_id: Optional[str] = None, The following resources are used in the same context: - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * get_aws_assume_role_policy data to construct the necessary AWS STS assume role policy. * get_aws_bucket_policy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * InstanceProfile to manage AWS EC2 instance profiles that users can launch Cluster and access data, like databricks_mount. @@ -200,7 +200,7 @@ def get_aws_cross_account_policy_output(aws_account_id: Optional[pulumi.Input[Op The following resources are used in the same context: - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide * get_aws_assume_role_policy data to construct the necessary AWS STS assume role policy. * get_aws_bucket_policy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it. * InstanceProfile to manage AWS EC2 instance profiles that users can launch Cluster and access data, like databricks_mount. diff --git a/sdk/python/pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py b/sdk/python/pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py index a901eaee..2f1d75fb 100644 --- a/sdk/python/pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py +++ b/sdk/python/pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py @@ -97,7 +97,7 @@ def get_aws_unity_catalog_assume_role_policy(aws_account_id: Optional[str] = Non """ > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. - This data source constructs necessary AWS Unity Catalog assume role policy for you. + This data source constructs the necessary AWS Unity Catalog assume role policy for you. ## Example Usage @@ -118,14 +118,14 @@ def get_aws_unity_catalog_assume_role_policy(aws_account_id: Optional[str] = Non policy=this.json) metastore_data_access = aws.iam.Role("metastore_data_access", name=f"{prefix}-uc-access", - assume_role_policy=passrole_for_uc["json"], + assume_role_policy=this_aws_iam_policy_document["json"], managed_policy_arns=[unity_metastore.arn]) ``` :param str aws_account_id: The Account ID of the current AWS account (not your Databricks account). :param str external_id: The storage credential external id. - :param str role_name: The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + :param str role_name: The name of the AWS IAM role to be created for Unity Catalog. :param str unity_catalog_iam_arn: The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` """ __args__ = dict() @@ -154,7 +154,7 @@ def get_aws_unity_catalog_assume_role_policy_output(aws_account_id: Optional[pul """ > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. - This data source constructs necessary AWS Unity Catalog assume role policy for you. + This data source constructs the necessary AWS Unity Catalog assume role policy for you. ## Example Usage @@ -175,14 +175,14 @@ def get_aws_unity_catalog_assume_role_policy_output(aws_account_id: Optional[pul policy=this.json) metastore_data_access = aws.iam.Role("metastore_data_access", name=f"{prefix}-uc-access", - assume_role_policy=passrole_for_uc["json"], + assume_role_policy=this_aws_iam_policy_document["json"], managed_policy_arns=[unity_metastore.arn]) ``` :param str aws_account_id: The Account ID of the current AWS account (not your Databricks account). :param str external_id: The storage credential external id. - :param str role_name: The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). + :param str role_name: The name of the AWS IAM role to be created for Unity Catalog. :param str unity_catalog_iam_arn: The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` """ ... diff --git a/sdk/python/pulumi_databricks/get_aws_unity_catalog_policy.py b/sdk/python/pulumi_databricks/get_aws_unity_catalog_policy.py index 52c337f1..e0822093 100644 --- a/sdk/python/pulumi_databricks/get_aws_unity_catalog_policy.py +++ b/sdk/python/pulumi_databricks/get_aws_unity_catalog_policy.py @@ -100,7 +100,7 @@ def get_aws_unity_catalog_policy(aws_account_id: Optional[str] = None, """ > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. - This data source constructs necessary AWS Unity Catalog policy for you. + This data source constructs the necessary AWS Unity Catalog policy for you. ## Example Usage @@ -121,7 +121,7 @@ def get_aws_unity_catalog_policy(aws_account_id: Optional[str] = None, policy=this.json) metastore_data_access = aws.iam.Role("metastore_data_access", name=f"{prefix}-uc-access", - assume_role_policy=passrole_for_uc["json"], + assume_role_policy=this_aws_iam_policy_document["json"], managed_policy_arns=[unity_metastore.arn]) ``` @@ -157,7 +157,7 @@ def get_aws_unity_catalog_policy_output(aws_account_id: Optional[pulumi.Input[st """ > **Note** This resource has an evolving API, which may change in future versions of the provider. Please always consult [latest documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws) in case of any questions. - This data source constructs necessary AWS Unity Catalog policy for you. + This data source constructs the necessary AWS Unity Catalog policy for you. ## Example Usage @@ -178,7 +178,7 @@ def get_aws_unity_catalog_policy_output(aws_account_id: Optional[pulumi.Input[st policy=this.json) metastore_data_access = aws.iam.Role("metastore_data_access", name=f"{prefix}-uc-access", - assume_role_policy=passrole_for_uc["json"], + assume_role_policy=this_aws_iam_policy_document["json"], managed_policy_arns=[unity_metastore.arn]) ``` diff --git a/sdk/python/pulumi_databricks/get_mws_credentials.py b/sdk/python/pulumi_databricks/get_mws_credentials.py index 4d6e86f5..b37b1074 100644 --- a/sdk/python/pulumi_databricks/get_mws_credentials.py +++ b/sdk/python/pulumi_databricks/get_mws_credentials.py @@ -86,7 +86,7 @@ def get_mws_credentials(ids: Optional[Mapping[str, Any]] = None, * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). :param Mapping[str, Any] ids: name-to-id map for all of the credentials in the account @@ -132,7 +132,7 @@ def get_mws_credentials_output(ids: Optional[pulumi.Input[Optional[Mapping[str, * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). :param Mapping[str, Any] ids: name-to-id map for all of the credentials in the account diff --git a/sdk/python/pulumi_databricks/get_mws_workspaces.py b/sdk/python/pulumi_databricks/get_mws_workspaces.py index 921d18b2..782d0603 100644 --- a/sdk/python/pulumi_databricks/get_mws_workspaces.py +++ b/sdk/python/pulumi_databricks/get_mws_workspaces.py @@ -81,7 +81,7 @@ def get_mws_workspaces(ids: Optional[Mapping[str, Any]] = None, The following resources are used in the same context: - * MwsWorkspaces to manage Databricks E2 Workspaces. + * MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * MetastoreAssignment @@ -123,7 +123,7 @@ def get_mws_workspaces_output(ids: Optional[pulumi.Input[Optional[Mapping[str, A The following resources are used in the same context: - * MwsWorkspaces to manage Databricks E2 Workspaces. + * MwsWorkspaces to manage Databricks Workspaces on AWS and GCP. * MetastoreAssignment diff --git a/sdk/python/pulumi_databricks/ip_access_list.py b/sdk/python/pulumi_databricks/ip_access_list.py index 98514abb..0830c948 100644 --- a/sdk/python/pulumi_databricks/ip_access_list.py +++ b/sdk/python/pulumi_databricks/ip_access_list.py @@ -192,7 +192,7 @@ def __init__(__self__, The following resources are often used in the same context: * End to end workspace management guide. - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). * Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. @@ -251,7 +251,7 @@ def __init__(__self__, The following resources are often used in the same context: * End to end workspace management guide. - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsPrivateAccessSettings to create a [Private Access Setting](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html#step-5-create-a-private-access-settings-configuration-using-the-databricks-account-api) that can be used as part of a MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). * Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. diff --git a/sdk/python/pulumi_databricks/mws_credentials.py b/sdk/python/pulumi_databricks/mws_credentials.py index debb7b84..6d3affca 100644 --- a/sdk/python/pulumi_databricks/mws_credentials.py +++ b/sdk/python/pulumi_databricks/mws_credentials.py @@ -223,7 +223,7 @@ def __init__(__self__, * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import @@ -264,7 +264,7 @@ def __init__(__self__, * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import diff --git a/sdk/python/pulumi_databricks/mws_customer_managed_keys.py b/sdk/python/pulumi_databricks/mws_customer_managed_keys.py index b36913be..493a32a7 100644 --- a/sdk/python/pulumi_databricks/mws_customer_managed_keys.py +++ b/sdk/python/pulumi_databricks/mws_customer_managed_keys.py @@ -440,7 +440,7 @@ def __init__(__self__, * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import @@ -664,7 +664,7 @@ def __init__(__self__, * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import diff --git a/sdk/python/pulumi_databricks/mws_log_delivery.py b/sdk/python/pulumi_databricks/mws_log_delivery.py index 4f0793fc..c641f890 100644 --- a/sdk/python/pulumi_databricks/mws_log_delivery.py +++ b/sdk/python/pulumi_databricks/mws_log_delivery.py @@ -37,7 +37,7 @@ def __init__(__self__, *, :param pulumi.Input[str] delivery_path_prefix: Defaults to empty, which means that logs are delivered to the root of the bucket. The value must be a valid S3 object key. It must not start or end with a slash character. :param pulumi.Input[str] delivery_start_time: The optional start month and year for delivery, specified in YYYY-MM format. Defaults to current year and month. Usage is not available before 2019-03. :param pulumi.Input[str] status: Status of log delivery configuration. Set to ENABLED or DISABLED. Defaults to ENABLED. This is the only field you can update. - :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. """ pulumi.set(__self__, "account_id", account_id) pulumi.set(__self__, "credentials_id", credentials_id) @@ -181,7 +181,7 @@ def status(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="workspaceIdsFilters") def workspace_ids_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]: """ - By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. """ return pulumi.get(self, "workspace_ids_filters") @@ -216,7 +216,7 @@ def __init__(__self__, *, :param pulumi.Input[str] output_format: The file type of log delivery. Currently `CSV` (for `BILLABLE_USAGE`) and `JSON` (for `AUDIT_LOGS`) are supported. :param pulumi.Input[str] status: Status of log delivery configuration. Set to ENABLED or DISABLED. Defaults to ENABLED. This is the only field you can update. :param pulumi.Input[str] storage_configuration_id: The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. - :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. """ if account_id is not None: pulumi.set(__self__, "account_id", account_id) @@ -365,7 +365,7 @@ def storage_configuration_id(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="workspaceIdsFilters") def workspace_ids_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]: """ - By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. """ return pulumi.get(self, "workspace_ids_filters") @@ -445,7 +445,7 @@ def __init__(__self__, * MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import @@ -463,7 +463,7 @@ def __init__(__self__, :param pulumi.Input[str] output_format: The file type of log delivery. Currently `CSV` (for `BILLABLE_USAGE`) and `JSON` (for `AUDIT_LOGS`) are supported. :param pulumi.Input[str] status: Status of log delivery configuration. Set to ENABLED or DISABLED. Defaults to ENABLED. This is the only field you can update. :param pulumi.Input[str] storage_configuration_id: The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. - :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. """ ... @overload @@ -525,7 +525,7 @@ def __init__(__self__, * MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. * MwsStorageConfigurations to configure root bucket new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import @@ -625,7 +625,7 @@ def get(resource_name: str, :param pulumi.Input[str] output_format: The file type of log delivery. Currently `CSV` (for `BILLABLE_USAGE`) and `JSON` (for `AUDIT_LOGS`) are supported. :param pulumi.Input[str] status: Status of log delivery configuration. Set to ENABLED or DISABLED. Defaults to ENABLED. This is the only field you can update. :param pulumi.Input[str] storage_configuration_id: The ID for a Databricks storage configuration that represents the S3 bucket with bucket policy as described in the main billable usage documentation page. - :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + :param pulumi.Input[Sequence[pulumi.Input[int]]] workspace_ids_filters: By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -728,7 +728,7 @@ def storage_configuration_id(self) -> pulumi.Output[str]: @pulumi.getter(name="workspaceIdsFilters") def workspace_ids_filters(self) -> pulumi.Output[Optional[Sequence[int]]]: """ - By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. + By default, this log configuration applies to all workspaces associated with your account ID. If your account is on the multitenant version of the platform or on a select custom plan that allows multiple workspaces per account, you may have multiple workspaces associated with your account ID. You can optionally set the field as mentioned earlier to an array of workspace IDs. If you plan to use different log delivery configurations for several workspaces, set this explicitly rather than leaving it blank. If you leave this blank and your account ID gets additional workspaces in the future, this configuration will also apply to the new workspaces. """ return pulumi.get(self, "workspace_ids_filters") diff --git a/sdk/python/pulumi_databricks/mws_networks.py b/sdk/python/pulumi_databricks/mws_networks.py index 229c2259..308c0aa1 100644 --- a/sdk/python/pulumi_databricks/mws_networks.py +++ b/sdk/python/pulumi_databricks/mws_networks.py @@ -476,13 +476,13 @@ def __init__(__self__, The following resources are used in the same context: * Provisioning Databricks on AWS guide. - * Provisioning Databricks on AWS with PrivateLink guide. - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * Provisioning Databricks on AWS with Private Link guide. + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * Provisioning Databricks on GCP guide. * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * MwsVpcEndpoint resources with Databricks such that they can be used as part of a MwsNetworks configuration. * MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html). - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import @@ -570,13 +570,13 @@ def __init__(__self__, The following resources are used in the same context: * Provisioning Databricks on AWS guide. - * Provisioning Databricks on AWS with PrivateLink guide. - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * Provisioning Databricks on AWS with Private Link guide. + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * Provisioning Databricks on GCP guide. * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * MwsVpcEndpoint resources with Databricks such that they can be used as part of a MwsNetworks configuration. * MwsPrivateAccessSettings to create a Private Access Setting that can be used as part of a MwsWorkspaces resource to create a [Databricks Workspace that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) or [GCP Private Service Connect](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/private-service-connect.html). - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import diff --git a/sdk/python/pulumi_databricks/mws_private_access_settings.py b/sdk/python/pulumi_databricks/mws_private_access_settings.py index 8be56ac9..c30fa1ed 100644 --- a/sdk/python/pulumi_databricks/mws_private_access_settings.py +++ b/sdk/python/pulumi_databricks/mws_private_access_settings.py @@ -330,12 +330,12 @@ def __init__(__self__, The following resources are used in the same context: * Provisioning Databricks on AWS guide. - * Provisioning Databricks on AWS with PrivateLink guide. - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * Provisioning Databricks on AWS with Private Link guide. + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * MwsVpcEndpoint resources with Databricks such that they can be used as part of a MwsNetworks configuration. * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import @@ -424,12 +424,12 @@ def __init__(__self__, The following resources are used in the same context: * Provisioning Databricks on AWS guide. - * Provisioning Databricks on AWS with PrivateLink guide. - * Provisioning AWS Databricks E2 with a Hub & Spoke firewall for data exfiltration protection guide. + * Provisioning Databricks on AWS with Private Link guide. + * Provisioning AWS Databricks workspaces with a Hub & Spoke firewall for data exfiltration protection guide. * Provisioning Databricks workspaces on GCP with Private Service Connect guide. * MwsVpcEndpoint resources with Databricks such that they can be used as part of a MwsNetworks configuration. * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import diff --git a/sdk/python/pulumi_databricks/mws_storage_configurations.py b/sdk/python/pulumi_databricks/mws_storage_configurations.py index edb544f8..e48acf67 100644 --- a/sdk/python/pulumi_databricks/mws_storage_configurations.py +++ b/sdk/python/pulumi_databricks/mws_storage_configurations.py @@ -148,12 +148,12 @@ def __init__(__self__, The following resources are used in the same context: * Provisioning Databricks on AWS guide. - * Provisioning Databricks on AWS with PrivateLink guide. + * Provisioning Databricks on AWS with Private Link guide. * MwsCredentials to configure the cross-account role for creation of new workspaces within AWS. * MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import @@ -183,12 +183,12 @@ def __init__(__self__, The following resources are used in the same context: * Provisioning Databricks on AWS guide. - * Provisioning Databricks on AWS with PrivateLink guide. + * Provisioning Databricks on AWS with Private Link guide. * MwsCredentials to configure the cross-account role for creation of new workspaces within AWS. * MwsCustomerManagedKeys to configure KMS keys for new workspaces within AWS. * MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). * MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. - * MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). + * MwsWorkspaces to set up [AWS and GCP workspaces](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1). ## Import diff --git a/sdk/python/pulumi_databricks/online_table.py b/sdk/python/pulumi_databricks/online_table.py index d6246f77..930be14e 100644 --- a/sdk/python/pulumi_databricks/online_table.py +++ b/sdk/python/pulumi_databricks/online_table.py @@ -17,7 +17,8 @@ class OnlineTableArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, - spec: Optional[pulumi.Input['OnlineTableSpecArgs']] = None): + spec: Optional[pulumi.Input['OnlineTableSpecArgs']] = None, + table_serving_url: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a OnlineTable resource. :param pulumi.Input[str] name: 3-level name of the Online Table to create. @@ -27,6 +28,8 @@ def __init__(__self__, *, pulumi.set(__self__, "name", name) if spec is not None: pulumi.set(__self__, "spec", spec) + if table_serving_url is not None: + pulumi.set(__self__, "table_serving_url", table_serving_url) @property @pulumi.getter @@ -52,13 +55,23 @@ def spec(self) -> Optional[pulumi.Input['OnlineTableSpecArgs']]: def spec(self, value: Optional[pulumi.Input['OnlineTableSpecArgs']]): pulumi.set(self, "spec", value) + @property + @pulumi.getter(name="tableServingUrl") + def table_serving_url(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "table_serving_url") + + @table_serving_url.setter + def table_serving_url(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "table_serving_url", value) + @pulumi.input_type class _OnlineTableState: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, spec: Optional[pulumi.Input['OnlineTableSpecArgs']] = None, - statuses: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineTableStatusArgs']]]] = None): + statuses: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineTableStatusArgs']]]] = None, + table_serving_url: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering OnlineTable resources. :param pulumi.Input[str] name: 3-level name of the Online Table to create. @@ -71,6 +84,8 @@ def __init__(__self__, *, pulumi.set(__self__, "spec", spec) if statuses is not None: pulumi.set(__self__, "statuses", statuses) + if table_serving_url is not None: + pulumi.set(__self__, "table_serving_url", table_serving_url) @property @pulumi.getter @@ -108,6 +123,15 @@ def statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OnlineTableSt def statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineTableStatusArgs']]]]): pulumi.set(self, "statuses", value) + @property + @pulumi.getter(name="tableServingUrl") + def table_serving_url(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "table_serving_url") + + @table_serving_url.setter + def table_serving_url(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "table_serving_url", value) + class OnlineTable(pulumi.CustomResource): @overload @@ -116,6 +140,7 @@ def __init__(__self__, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, spec: Optional[pulumi.Input[pulumi.InputType['OnlineTableSpecArgs']]] = None, + table_serving_url: Optional[pulumi.Input[str]] = None, __props__=None): """ > **Note** This resource could be only used on Unity Catalog-enabled workspace! @@ -205,6 +230,7 @@ def _internal_init(__self__, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, spec: Optional[pulumi.Input[pulumi.InputType['OnlineTableSpecArgs']]] = None, + table_serving_url: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -216,6 +242,7 @@ def _internal_init(__self__, __props__.__dict__["name"] = name __props__.__dict__["spec"] = spec + __props__.__dict__["table_serving_url"] = table_serving_url __props__.__dict__["statuses"] = None super(OnlineTable, __self__).__init__( 'databricks:index/onlineTable:OnlineTable', @@ -229,7 +256,8 @@ def get(resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, name: Optional[pulumi.Input[str]] = None, spec: Optional[pulumi.Input[pulumi.InputType['OnlineTableSpecArgs']]] = None, - statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OnlineTableStatusArgs']]]]] = None) -> 'OnlineTable': + statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OnlineTableStatusArgs']]]]] = None, + table_serving_url: Optional[pulumi.Input[str]] = None) -> 'OnlineTable': """ Get an existing OnlineTable resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -248,6 +276,7 @@ def get(resource_name: str, __props__.__dict__["name"] = name __props__.__dict__["spec"] = spec __props__.__dict__["statuses"] = statuses + __props__.__dict__["table_serving_url"] = table_serving_url return OnlineTable(resource_name, opts=opts, __props__=__props__) @property @@ -274,3 +303,8 @@ def statuses(self) -> pulumi.Output[Sequence['outputs.OnlineTableStatus']]: """ return pulumi.get(self, "statuses") + @property + @pulumi.getter(name="tableServingUrl") + def table_serving_url(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "table_serving_url") + diff --git a/sdk/python/pulumi_databricks/outputs.py b/sdk/python/pulumi_databricks/outputs.py index fb17740a..4c519364 100644 --- a/sdk/python/pulumi_databricks/outputs.py +++ b/sdk/python/pulumi_databricks/outputs.py @@ -211,6 +211,7 @@ 'JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded', 'JobTaskForEachTaskTaskWebhookNotificationsOnFailure', 'JobTaskForEachTaskTaskWebhookNotificationsOnStart', + 'JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded', 'JobTaskForEachTaskTaskWebhookNotificationsOnSuccess', 'JobTaskHealth', 'JobTaskHealthRule', @@ -265,15 +266,18 @@ 'JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded', 'JobTaskWebhookNotificationsOnFailure', 'JobTaskWebhookNotificationsOnStart', + 'JobTaskWebhookNotificationsOnStreamingBacklogExceeded', 'JobTaskWebhookNotificationsOnSuccess', 'JobTrigger', 'JobTriggerFileArrival', + 'JobTriggerPeriodic', 'JobTriggerTable', 'JobTriggerTableUpdate', 'JobWebhookNotifications', 'JobWebhookNotificationsOnDurationWarningThresholdExceeded', 'JobWebhookNotificationsOnFailure', 'JobWebhookNotificationsOnStart', + 'JobWebhookNotificationsOnStreamingBacklogExceeded', 'JobWebhookNotificationsOnSuccess', 'LakehouseMonitorCustomMetric', 'LakehouseMonitorDataClassificationConfig', @@ -609,6 +613,7 @@ 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededResult', 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureResult', 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartResult', + 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult', 'GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessResult', 'GetJobJobSettingsSettingsTaskHealthResult', 'GetJobJobSettingsSettingsTaskHealthRuleResult', @@ -657,6 +662,7 @@ 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceededResult', 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureResult', 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartResult', + 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult', 'GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessResult', 'GetJobJobSettingsSettingsTriggerResult', 'GetJobJobSettingsSettingsTriggerFileArrivalResult', @@ -665,6 +671,7 @@ 'GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceededResult', 'GetJobJobSettingsSettingsWebhookNotificationsOnFailureResult', 'GetJobJobSettingsSettingsWebhookNotificationsOnStartResult', + 'GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult', 'GetJobJobSettingsSettingsWebhookNotificationsOnSuccessResult', 'GetMetastoreMetastoreInfoResult', 'GetMlflowExperimentTagResult', @@ -3327,6 +3334,8 @@ def __key_warning(key: str): suggest = "on_failures" elif key == "onStarts": suggest = "on_starts" + elif key == "onStreamingBacklogExceededs": + suggest = "on_streaming_backlog_exceededs" elif key == "onSuccesses": suggest = "on_successes" @@ -3346,6 +3355,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): """ :param bool no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). @@ -3364,6 +3374,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -3401,6 +3413,11 @@ def on_starts(self) -> Optional[Sequence[str]]: """ return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -7705,6 +7722,8 @@ def __key_warning(key: str): suggest = "on_failures" elif key == "onStarts": suggest = "on_starts" + elif key == "onStreamingBacklogExceededs": + suggest = "on_streaming_backlog_exceededs" elif key == "onSuccesses": suggest = "on_successes" @@ -7724,6 +7743,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): """ :param bool no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). @@ -7742,6 +7762,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -7779,6 +7801,11 @@ def on_starts(self) -> Optional[Sequence[str]]: """ return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -8413,6 +8440,8 @@ def __key_warning(key: str): suggest = "on_failures" elif key == "onStarts": suggest = "on_starts" + elif key == "onStreamingBacklogExceededs": + suggest = "on_streaming_backlog_exceededs" elif key == "onSuccesses": suggest = "on_successes" @@ -8432,6 +8461,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): """ :param bool no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). @@ -8450,6 +8480,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -8487,6 +8519,11 @@ def on_starts(self) -> Optional[Sequence[str]]: """ return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -11019,6 +11056,8 @@ def __key_warning(key: str): suggest = "on_failures" elif key == "onStarts": suggest = "on_starts" + elif key == "onStreamingBacklogExceededs": + suggest = "on_streaming_backlog_exceededs" elif key == "onSuccesses": suggest = "on_successes" @@ -11037,6 +11076,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded']] = None, on_failures: Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookNotificationsOnFailure']] = None, on_starts: Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookNotificationsOnStart']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded']] = None, on_successes: Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookNotificationsOnSuccess']] = None): """ :param Sequence['JobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs'] on_duration_warning_threshold_exceededs: (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the `RUN_DURATION_SECONDS` metric in the `health` block. @@ -11054,6 +11094,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -11085,6 +11127,11 @@ def on_starts(self) -> Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookN """ return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['outputs.JobTaskForEachTaskTaskWebhookNotificationsOnSuccess']]: @@ -11148,6 +11195,24 @@ def id(self) -> str: return pulumi.get(self, "id") +@pulumi.output_type +class JobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: ID of the job + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + ID of the job + """ + return pulumi.get(self, "id") + + @pulumi.output_type class JobTaskForEachTaskTaskWebhookNotificationsOnSuccess(dict): def __init__(__self__, *, @@ -13689,6 +13754,8 @@ def __key_warning(key: str): suggest = "on_failures" elif key == "onStarts": suggest = "on_starts" + elif key == "onStreamingBacklogExceededs": + suggest = "on_streaming_backlog_exceededs" elif key == "onSuccesses": suggest = "on_successes" @@ -13707,6 +13774,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['outputs.JobTaskWebhookNotificationsOnDurationWarningThresholdExceeded']] = None, on_failures: Optional[Sequence['outputs.JobTaskWebhookNotificationsOnFailure']] = None, on_starts: Optional[Sequence['outputs.JobTaskWebhookNotificationsOnStart']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['outputs.JobTaskWebhookNotificationsOnStreamingBacklogExceeded']] = None, on_successes: Optional[Sequence['outputs.JobTaskWebhookNotificationsOnSuccess']] = None): """ :param Sequence['JobTaskWebhookNotificationsOnDurationWarningThresholdExceededArgs'] on_duration_warning_threshold_exceededs: (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the `RUN_DURATION_SECONDS` metric in the `health` block. @@ -13724,6 +13792,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -13755,6 +13825,11 @@ def on_starts(self) -> Optional[Sequence['outputs.JobTaskWebhookNotificationsOnS """ return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['outputs.JobTaskWebhookNotificationsOnStreamingBacklogExceeded']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['outputs.JobTaskWebhookNotificationsOnSuccess']]: @@ -13818,6 +13893,24 @@ def id(self) -> str: return pulumi.get(self, "id") +@pulumi.output_type +class JobTaskWebhookNotificationsOnStreamingBacklogExceeded(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: ID of the job + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + ID of the job + """ + return pulumi.get(self, "id") + + @pulumi.output_type class JobTaskWebhookNotificationsOnSuccess(dict): def __init__(__self__, *, @@ -13862,6 +13955,7 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, file_arrival: Optional['outputs.JobTriggerFileArrival'] = None, pause_status: Optional[str] = None, + periodic: Optional['outputs.JobTriggerPeriodic'] = None, table: Optional['outputs.JobTriggerTable'] = None, table_update: Optional['outputs.JobTriggerTableUpdate'] = None): """ @@ -13872,6 +13966,8 @@ def __init__(__self__, *, pulumi.set(__self__, "file_arrival", file_arrival) if pause_status is not None: pulumi.set(__self__, "pause_status", pause_status) + if periodic is not None: + pulumi.set(__self__, "periodic", periodic) if table is not None: pulumi.set(__self__, "table", table) if table_update is not None: @@ -13893,6 +13989,11 @@ def pause_status(self) -> Optional[str]: """ return pulumi.get(self, "pause_status") + @property + @pulumi.getter + def periodic(self) -> Optional['outputs.JobTriggerPeriodic']: + return pulumi.get(self, "periodic") + @property @pulumi.getter def table(self) -> Optional['outputs.JobTriggerTable']: @@ -13965,6 +14066,25 @@ def wait_after_last_change_seconds(self) -> Optional[int]: return pulumi.get(self, "wait_after_last_change_seconds") +@pulumi.output_type +class JobTriggerPeriodic(dict): + def __init__(__self__, *, + interval: int, + unit: str): + pulumi.set(__self__, "interval", interval) + pulumi.set(__self__, "unit", unit) + + @property + @pulumi.getter + def interval(self) -> int: + return pulumi.get(self, "interval") + + @property + @pulumi.getter + def unit(self) -> str: + return pulumi.get(self, "unit") + + @pulumi.output_type class JobTriggerTable(dict): @staticmethod @@ -14091,6 +14211,8 @@ def __key_warning(key: str): suggest = "on_failures" elif key == "onStarts": suggest = "on_starts" + elif key == "onStreamingBacklogExceededs": + suggest = "on_streaming_backlog_exceededs" elif key == "onSuccesses": suggest = "on_successes" @@ -14109,6 +14231,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['outputs.JobWebhookNotificationsOnDurationWarningThresholdExceeded']] = None, on_failures: Optional[Sequence['outputs.JobWebhookNotificationsOnFailure']] = None, on_starts: Optional[Sequence['outputs.JobWebhookNotificationsOnStart']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['outputs.JobWebhookNotificationsOnStreamingBacklogExceeded']] = None, on_successes: Optional[Sequence['outputs.JobWebhookNotificationsOnSuccess']] = None): """ :param Sequence['JobWebhookNotificationsOnDurationWarningThresholdExceededArgs'] on_duration_warning_threshold_exceededs: (List) list of notification IDs to call when the duration of a run exceeds the threshold specified by the `RUN_DURATION_SECONDS` metric in the `health` block. @@ -14126,6 +14249,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -14157,6 +14282,11 @@ def on_starts(self) -> Optional[Sequence['outputs.JobWebhookNotificationsOnStart """ return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['outputs.JobWebhookNotificationsOnStreamingBacklogExceeded']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['outputs.JobWebhookNotificationsOnSuccess']]: @@ -14220,6 +14350,24 @@ def id(self) -> str: return pulumi.get(self, "id") +@pulumi.output_type +class JobWebhookNotificationsOnStreamingBacklogExceeded(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: ID of the job + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + ID of the job + """ + return pulumi.get(self, "id") + + @pulumi.output_type class JobWebhookNotificationsOnSuccess(dict): def __init__(__self__, *, @@ -23737,6 +23885,7 @@ def __init__(__self__, *, credential_id: Optional[str] = None, credential_name: Optional[str] = None, encryption_details: Optional['outputs.GetExternalLocationExternalLocationInfoEncryptionDetailsResult'] = None, + isolation_mode: Optional[str] = None, metastore_id: Optional[str] = None, name: Optional[str] = None, owner: Optional[str] = None, @@ -23776,6 +23925,8 @@ def __init__(__self__, *, pulumi.set(__self__, "credential_name", credential_name) if encryption_details is not None: pulumi.set(__self__, "encryption_details", encryption_details) + if isolation_mode is not None: + pulumi.set(__self__, "isolation_mode", isolation_mode) if metastore_id is not None: pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: @@ -23852,6 +24003,11 @@ def encryption_details(self) -> Optional['outputs.GetExternalLocationExternalLoc """ return pulumi.get(self, "encryption_details") + @property + @pulumi.getter(name="isolationMode") + def isolation_mode(self) -> Optional[str]: + return pulumi.get(self, "isolation_mode") + @property @pulumi.getter(name="metastoreId") def metastore_id(self) -> Optional[str]: @@ -24869,6 +25025,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): if no_alert_for_skipped_runs is not None: pulumi.set(__self__, "no_alert_for_skipped_runs", no_alert_for_skipped_runs) @@ -24878,6 +25035,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -24901,6 +25060,11 @@ def on_failures(self) -> Optional[Sequence[str]]: def on_starts(self) -> Optional[Sequence[str]]: return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -27471,6 +27635,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): if no_alert_for_skipped_runs is not None: pulumi.set(__self__, "no_alert_for_skipped_runs", no_alert_for_skipped_runs) @@ -27480,6 +27645,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -27503,6 +27670,11 @@ def on_failures(self) -> Optional[Sequence[str]]: def on_starts(self) -> Optional[Sequence[str]]: return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -27868,6 +28040,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence[str]] = None, on_failures: Optional[Sequence[str]] = None, on_starts: Optional[Sequence[str]] = None, + on_streaming_backlog_exceededs: Optional[Sequence[str]] = None, on_successes: Optional[Sequence[str]] = None): if no_alert_for_skipped_runs is not None: pulumi.set(__self__, "no_alert_for_skipped_runs", no_alert_for_skipped_runs) @@ -27877,6 +28050,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -27900,6 +28075,11 @@ def on_failures(self) -> Optional[Sequence[str]]: def on_starts(self) -> Optional[Sequence[str]]: return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence[str]]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence[str]]: @@ -29295,6 +29475,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceededResult']] = None, on_failures: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailureResult']] = None, on_starts: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartResult']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult']] = None, on_successes: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessResult']] = None): if on_duration_warning_threshold_exceededs is not None: pulumi.set(__self__, "on_duration_warning_threshold_exceededs", on_duration_warning_threshold_exceededs) @@ -29302,6 +29483,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -29320,6 +29503,11 @@ def on_failures(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTas def on_starts(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStartResult']]: return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessResult']]: @@ -29380,6 +29568,24 @@ def id(self) -> str: return pulumi.get(self, "id") +@pulumi.output_type +class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceededResult(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: the id of Job if the resource was matched by name. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + the id of Job if the resource was matched by name. + """ + return pulumi.get(self, "id") + + @pulumi.output_type class GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccessResult(dict): def __init__(__self__, *, @@ -30787,6 +30993,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceededResult']] = None, on_failures: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnFailureResult']] = None, on_starts: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartResult']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult']] = None, on_successes: Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessResult']] = None): if on_duration_warning_threshold_exceededs is not None: pulumi.set(__self__, "on_duration_warning_threshold_exceededs", on_duration_warning_threshold_exceededs) @@ -30794,6 +31001,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -30812,6 +31021,11 @@ def on_failures(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTas def on_starts(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStartResult']]: return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessResult']]: @@ -30872,6 +31086,24 @@ def id(self) -> str: return pulumi.get(self, "id") +@pulumi.output_type +class GetJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceededResult(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: the id of Job if the resource was matched by name. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + the id of Job if the resource was matched by name. + """ + return pulumi.get(self, "id") + + @pulumi.output_type class GetJobJobSettingsSettingsTaskWebhookNotificationsOnSuccessResult(dict): def __init__(__self__, *, @@ -30989,6 +31221,7 @@ def __init__(__self__, *, on_duration_warning_threshold_exceededs: Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceededResult']] = None, on_failures: Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnFailureResult']] = None, on_starts: Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStartResult']] = None, + on_streaming_backlog_exceededs: Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult']] = None, on_successes: Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnSuccessResult']] = None): if on_duration_warning_threshold_exceededs is not None: pulumi.set(__self__, "on_duration_warning_threshold_exceededs", on_duration_warning_threshold_exceededs) @@ -30996,6 +31229,8 @@ def __init__(__self__, *, pulumi.set(__self__, "on_failures", on_failures) if on_starts is not None: pulumi.set(__self__, "on_starts", on_starts) + if on_streaming_backlog_exceededs is not None: + pulumi.set(__self__, "on_streaming_backlog_exceededs", on_streaming_backlog_exceededs) if on_successes is not None: pulumi.set(__self__, "on_successes", on_successes) @@ -31014,6 +31249,11 @@ def on_failures(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsWeb def on_starts(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStartResult']]: return pulumi.get(self, "on_starts") + @property + @pulumi.getter(name="onStreamingBacklogExceededs") + def on_streaming_backlog_exceededs(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult']]: + return pulumi.get(self, "on_streaming_backlog_exceededs") + @property @pulumi.getter(name="onSuccesses") def on_successes(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsWebhookNotificationsOnSuccessResult']]: @@ -31074,6 +31314,24 @@ def id(self) -> str: return pulumi.get(self, "id") +@pulumi.output_type +class GetJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceededResult(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: the id of Job if the resource was matched by name. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + the id of Job if the resource was matched by name. + """ + return pulumi.get(self, "id") + + @pulumi.output_type class GetJobJobSettingsSettingsWebhookNotificationsOnSuccessResult(dict): def __init__(__self__, *, @@ -31831,6 +32089,7 @@ def __init__(__self__, *, created_by: Optional[str] = None, databricks_gcp_service_account: Optional['outputs.GetStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccountResult'] = None, id: Optional[str] = None, + isolation_mode: Optional[str] = None, metastore_id: Optional[str] = None, name: Optional[str] = None, owner: Optional[str] = None, @@ -31871,6 +32130,8 @@ def __init__(__self__, *, pulumi.set(__self__, "databricks_gcp_service_account", databricks_gcp_service_account) if id is not None: pulumi.set(__self__, "id", id) + if isolation_mode is not None: + pulumi.set(__self__, "isolation_mode", isolation_mode) if metastore_id is not None: pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: @@ -31952,6 +32213,11 @@ def id(self) -> Optional[str]: """ return pulumi.get(self, "id") + @property + @pulumi.getter(name="isolationMode") + def isolation_mode(self) -> Optional[str]: + return pulumi.get(self, "isolation_mode") + @property @pulumi.getter(name="metastoreId") def metastore_id(self) -> Optional[str]: diff --git a/sdk/python/pulumi_databricks/provider.py b/sdk/python/pulumi_databricks/provider.py index 48130e91..85edd417 100644 --- a/sdk/python/pulumi_databricks/provider.py +++ b/sdk/python/pulumi_databricks/provider.py @@ -39,6 +39,7 @@ def __init__(__self__, *, profile: Optional[pulumi.Input[str]] = None, rate_limit: Optional[pulumi.Input[int]] = None, retry_timeout_seconds: Optional[pulumi.Input[int]] = None, + serverless_compute_id: Optional[pulumi.Input[str]] = None, skip_verify: Optional[pulumi.Input[bool]] = None, token: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None, @@ -96,6 +97,8 @@ def __init__(__self__, *, pulumi.set(__self__, "rate_limit", rate_limit) if retry_timeout_seconds is not None: pulumi.set(__self__, "retry_timeout_seconds", retry_timeout_seconds) + if serverless_compute_id is not None: + pulumi.set(__self__, "serverless_compute_id", serverless_compute_id) if skip_verify is not None: pulumi.set(__self__, "skip_verify", skip_verify) if token is not None: @@ -330,6 +333,15 @@ def retry_timeout_seconds(self) -> Optional[pulumi.Input[int]]: def retry_timeout_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "retry_timeout_seconds", value) + @property + @pulumi.getter(name="serverlessComputeId") + def serverless_compute_id(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "serverless_compute_id") + + @serverless_compute_id.setter + def serverless_compute_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "serverless_compute_id", value) + @property @pulumi.getter(name="skipVerify") def skip_verify(self) -> Optional[pulumi.Input[bool]]: @@ -397,6 +409,7 @@ def __init__(__self__, profile: Optional[pulumi.Input[str]] = None, rate_limit: Optional[pulumi.Input[int]] = None, retry_timeout_seconds: Optional[pulumi.Input[int]] = None, + serverless_compute_id: Optional[pulumi.Input[str]] = None, skip_verify: Optional[pulumi.Input[bool]] = None, token: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None, @@ -463,6 +476,7 @@ def _internal_init(__self__, profile: Optional[pulumi.Input[str]] = None, rate_limit: Optional[pulumi.Input[int]] = None, retry_timeout_seconds: Optional[pulumi.Input[int]] = None, + serverless_compute_id: Optional[pulumi.Input[str]] = None, skip_verify: Optional[pulumi.Input[bool]] = None, token: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None, @@ -501,6 +515,7 @@ def _internal_init(__self__, __props__.__dict__["profile"] = profile __props__.__dict__["rate_limit"] = pulumi.Output.from_input(rate_limit).apply(pulumi.runtime.to_json) if rate_limit is not None else None __props__.__dict__["retry_timeout_seconds"] = pulumi.Output.from_input(retry_timeout_seconds).apply(pulumi.runtime.to_json) if retry_timeout_seconds is not None else None + __props__.__dict__["serverless_compute_id"] = serverless_compute_id __props__.__dict__["skip_verify"] = pulumi.Output.from_input(skip_verify).apply(pulumi.runtime.to_json) if skip_verify is not None else None __props__.__dict__["token"] = None if token is None else pulumi.Output.secret(token) __props__.__dict__["username"] = username @@ -608,6 +623,11 @@ def password(self) -> pulumi.Output[Optional[str]]: def profile(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "profile") + @property + @pulumi.getter(name="serverlessComputeId") + def serverless_compute_id(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "serverless_compute_id") + @property @pulumi.getter def token(self) -> pulumi.Output[Optional[str]]: diff --git a/sdk/python/pulumi_databricks/sql_permissions.py b/sdk/python/pulumi_databricks/sql_permissions.py index 03760ad8..203c2f26 100644 --- a/sdk/python/pulumi_databricks/sql_permissions.py +++ b/sdk/python/pulumi_databricks/sql_permissions.py @@ -29,6 +29,7 @@ def __init__(__self__, *, :param pulumi.Input[bool] anonymous_function: If this access control for using anonymous function. Defaults to `false`. :param pulumi.Input[bool] any_file: If this access control for reading/writing any file. Defaults to `false`. :param pulumi.Input[bool] catalog: If this access control for the entire catalog. Defaults to `false`. + :param pulumi.Input[str] cluster_id: Id of an existing databricks_cluster, otherwise resource creation will fail. :param pulumi.Input[str] database: Name of the database. Has default value of `default`. :param pulumi.Input[str] table: Name of the table. Can be combined with `database`. :param pulumi.Input[str] view: Name of the view. Can be combined with `database`. @@ -89,6 +90,9 @@ def catalog(self, value: Optional[pulumi.Input[bool]]): @property @pulumi.getter(name="clusterId") def cluster_id(self) -> Optional[pulumi.Input[str]]: + """ + Id of an existing databricks_cluster, otherwise resource creation will fail. + """ return pulumi.get(self, "cluster_id") @cluster_id.setter @@ -157,6 +161,7 @@ def __init__(__self__, *, :param pulumi.Input[bool] anonymous_function: If this access control for using anonymous function. Defaults to `false`. :param pulumi.Input[bool] any_file: If this access control for reading/writing any file. Defaults to `false`. :param pulumi.Input[bool] catalog: If this access control for the entire catalog. Defaults to `false`. + :param pulumi.Input[str] cluster_id: Id of an existing databricks_cluster, otherwise resource creation will fail. :param pulumi.Input[str] database: Name of the database. Has default value of `default`. :param pulumi.Input[str] table: Name of the table. Can be combined with `database`. :param pulumi.Input[str] view: Name of the view. Can be combined with `database`. @@ -217,6 +222,9 @@ def catalog(self, value: Optional[pulumi.Input[bool]]): @property @pulumi.getter(name="clusterId") def cluster_id(self) -> Optional[pulumi.Input[str]]: + """ + Id of an existing databricks_cluster, otherwise resource creation will fail. + """ return pulumi.get(self, "cluster_id") @cluster_id.setter @@ -351,6 +359,7 @@ def __init__(__self__, :param pulumi.Input[bool] anonymous_function: If this access control for using anonymous function. Defaults to `false`. :param pulumi.Input[bool] any_file: If this access control for reading/writing any file. Defaults to `false`. :param pulumi.Input[bool] catalog: If this access control for the entire catalog. Defaults to `false`. + :param pulumi.Input[str] cluster_id: Id of an existing databricks_cluster, otherwise resource creation will fail. :param pulumi.Input[str] database: Name of the database. Has default value of `default`. :param pulumi.Input[str] table: Name of the table. Can be combined with `database`. :param pulumi.Input[str] view: Name of the view. Can be combined with `database`. @@ -492,6 +501,7 @@ def get(resource_name: str, :param pulumi.Input[bool] anonymous_function: If this access control for using anonymous function. Defaults to `false`. :param pulumi.Input[bool] any_file: If this access control for reading/writing any file. Defaults to `false`. :param pulumi.Input[bool] catalog: If this access control for the entire catalog. Defaults to `false`. + :param pulumi.Input[str] cluster_id: Id of an existing databricks_cluster, otherwise resource creation will fail. :param pulumi.Input[str] database: Name of the database. Has default value of `default`. :param pulumi.Input[str] table: Name of the table. Can be combined with `database`. :param pulumi.Input[str] view: Name of the view. Can be combined with `database`. @@ -537,6 +547,9 @@ def catalog(self) -> pulumi.Output[Optional[bool]]: @property @pulumi.getter(name="clusterId") def cluster_id(self) -> pulumi.Output[str]: + """ + Id of an existing databricks_cluster, otherwise resource creation will fail. + """ return pulumi.get(self, "cluster_id") @property