From b58f1e3c91ab7c50ea979f48aeff822afd396f31 Mon Sep 17 00:00:00 2001 From: Pulumi Bot <30351955+pulumi-bot@users.noreply.github.com> Date: Thu, 11 Jan 2024 21:46:09 -0800 Subject: [PATCH] Upgrade terraform-provider-databricks to v1.34.0 (#310) This PR was generated via `$ upgrade-provider pulumi/pulumi-databricks --kind=all --target-bridge-version=latest`. --- - Upgrading terraform-provider-databricks from 1.33.0 to 1.34.0. Fixes #309 --- .../bridge-metadata.json | 109 +- .../pulumi-resource-databricks/schema.json | 613 +++++++- provider/go.mod | 4 +- provider/go.sum | 8 +- sdk/dotnet/AccessControlRuleSet.cs | 2 + sdk/dotnet/Connection.cs | 2 + sdk/dotnet/DefaultNamespaceSetting.cs | 2 + sdk/dotnet/Directory.cs | 12 + sdk/dotnet/ExternalLocation.cs | 2 + sdk/dotnet/GetCurrentMetastore.cs | 165 +++ sdk/dotnet/GetDirectory.cs | 9 +- sdk/dotnet/GetServicePrincipal.cs | 4 +- sdk/dotnet/GetSqlWarehouse.cs | 96 +- sdk/dotnet/Grant.cs | 219 +++ .../GetCurrentMetastoreMetastoreInfo.cs | 125 ++ .../GetCurrentMetastoreMetastoreInfoArgs.cs | 125 ++ .../Inputs/GetMetastoreMetastoreInfo.cs | 2 +- .../Inputs/GetMetastoreMetastoreInfoArgs.cs | 2 +- sdk/dotnet/Inputs/GetSqlWarehouseChannel.cs | 3 + .../Inputs/GetSqlWarehouseChannelArgs.cs | 3 + sdk/dotnet/Inputs/GetSqlWarehouseHealth.cs | 35 + .../Inputs/GetSqlWarehouseHealthArgs.cs | 35 + .../GetSqlWarehouseHealthFailureReason.cs | 34 + .../GetSqlWarehouseHealthFailureReasonArgs.cs | 34 + .../Inputs/GetSqlWarehouseOdbcParams.cs | 15 +- .../Inputs/GetSqlWarehouseOdbcParamsArgs.cs | 15 +- sdk/dotnet/Inputs/GetSqlWarehouseTags.cs | 2 +- sdk/dotnet/Inputs/GetSqlWarehouseTagsArgs.cs | 2 +- .../Inputs/GetSqlWarehouseTagsCustomTag.cs | 8 +- .../GetSqlWarehouseTagsCustomTagArgs.cs | 8 +- sdk/dotnet/Inputs/SqlEndpointChannelArgs.cs | 3 + .../Inputs/SqlEndpointChannelGetArgs.cs | 3 + sdk/dotnet/Inputs/SqlEndpointHealthArgs.cs | 35 + .../SqlEndpointHealthFailureReasonArgs.cs | 34 + .../SqlEndpointHealthFailureReasonGetArgs.cs | 34 + sdk/dotnet/Inputs/SqlEndpointHealthGetArgs.cs | 35 + .../Inputs/SqlEndpointOdbcParamsArgs.cs | 15 +- .../Inputs/SqlEndpointOdbcParamsGetArgs.cs | 15 +- sdk/dotnet/Inputs/SqlEndpointTagsArgs.cs | 2 +- sdk/dotnet/Inputs/SqlEndpointTagsGetArgs.cs | 2 +- sdk/dotnet/Metastore.cs | 2 + sdk/dotnet/MetastoreAssignment.cs | 2 + sdk/dotnet/MetastoreDataAccess.cs | 11 + sdk/dotnet/MetastoreProvider.cs | 2 + .../GetCurrentMetastoreMetastoreInfoResult.cs | 144 ++ .../GetMetastoreMetastoreInfoResult.cs | 2 +- .../Outputs/GetSqlWarehouseChannelResult.cs | 7 +- ...etSqlWarehouseHealthFailureReasonResult.cs | 33 + .../Outputs/GetSqlWarehouseHealthResult.cs | 41 + .../GetSqlWarehouseOdbcParamsResult.cs | 16 +- .../GetSqlWarehouseTagsCustomTagResult.cs | 8 +- sdk/dotnet/Outputs/SqlEndpointChannel.cs | 7 +- sdk/dotnet/Outputs/SqlEndpointHealth.cs | 41 + .../Outputs/SqlEndpointHealthFailureReason.cs | 33 + sdk/dotnet/Outputs/SqlEndpointOdbcParams.cs | 16 +- sdk/dotnet/Recipient.cs | 2 + sdk/dotnet/RegisteredModel.cs | 2 + sdk/dotnet/Repo.cs | 12 + sdk/dotnet/Schema.cs | 2 + sdk/dotnet/SqlEndpoint.cs | 80 +- sdk/dotnet/StorageCredential.cs | 20 + sdk/dotnet/SystemSchema.cs | 3 +- sdk/dotnet/Volume.cs | 2 + sdk/go/databricks/accessControlRuleSet.go | 2 + sdk/go/databricks/connection.go | 2 + sdk/go/databricks/defaultNamespaceSetting.go | 2 + sdk/go/databricks/directory.go | 11 + sdk/go/databricks/externalLocation.go | 2 + sdk/go/databricks/getCurrentMetastore.go | 132 ++ sdk/go/databricks/getDirectory.go | 7 + sdk/go/databricks/getServicePrincipal.go | 4 +- sdk/go/databricks/getSqlWarehouse.go | 70 +- sdk/go/databricks/grant.go | 341 +++++ sdk/go/databricks/init.go | 7 + sdk/go/databricks/metastore.go | 2 + sdk/go/databricks/metastoreAssignment.go | 2 + sdk/go/databricks/metastoreDataAccess.go | 61 +- sdk/go/databricks/metastoreProvider.go | 2 + sdk/go/databricks/pulumiTypes.go | 1257 ++++++++++++++++- sdk/go/databricks/recipient.go | 2 + sdk/go/databricks/registeredModel.go | 2 + sdk/go/databricks/repo.go | 11 + sdk/go/databricks/schema.go | 2 + sdk/go/databricks/sqlEndpoint.go | 99 +- sdk/go/databricks/storageCredential.go | 17 + sdk/go/databricks/systemSchema.go | 4 +- sdk/go/databricks/volume.go | 2 + .../databricks/AccessControlRuleSet.java | 2 + .../com/pulumi/databricks/Connection.java | 2 + .../databricks/DatabricksFunctions.java | 303 ++++ .../databricks/DefaultNamespaceSetting.java | 2 + .../java/com/pulumi/databricks/Directory.java | 14 + .../pulumi/databricks/ExternalLocation.java | 2 + .../java/com/pulumi/databricks/Grant.java | 159 +++ .../java/com/pulumi/databricks/GrantArgs.java | 313 ++++ .../java/com/pulumi/databricks/Metastore.java | 2 + .../databricks/MetastoreAssignment.java | 2 + .../databricks/MetastoreDataAccess.java | 8 + .../databricks/MetastoreDataAccessArgs.java | 17 + .../pulumi/databricks/MetastoreProvider.java | 2 + .../java/com/pulumi/databricks/Recipient.java | 2 + .../pulumi/databricks/RegisteredModel.java | 2 + .../main/java/com/pulumi/databricks/Repo.java | 14 + .../java/com/pulumi/databricks/Schema.java | 2 + .../com/pulumi/databricks/SqlEndpoint.java | 70 +- .../pulumi/databricks/SqlEndpointArgs.java | 117 +- .../pulumi/databricks/StorageCredential.java | 16 + .../databricks/StorageCredentialArgs.java | 37 + .../com/pulumi/databricks/SystemSchema.java | 3 +- .../java/com/pulumi/databricks/Volume.java | 2 + .../databricks/inputs/DirectoryState.java | 37 + .../inputs/GetCurrentMetastoreArgs.java | 121 ++ .../GetCurrentMetastoreMetastoreInfo.java | 518 +++++++ .../GetCurrentMetastoreMetastoreInfoArgs.java | 693 +++++++++ .../inputs/GetCurrentMetastorePlainArgs.java | 100 ++ .../inputs/GetMetastoreMetastoreInfo.java | 6 +- .../inputs/GetMetastoreMetastoreInfoArgs.java | 8 +- .../inputs/GetServicePrincipalArgs.java | 8 +- .../inputs/GetServicePrincipalPlainArgs.java | 6 +- .../inputs/GetSqlWarehouseArgs.java | 189 +++ .../inputs/GetSqlWarehouseChannel.java | 13 + .../inputs/GetSqlWarehouseChannelArgs.java | 17 + .../inputs/GetSqlWarehouseHealth.java | 111 ++ .../inputs/GetSqlWarehouseHealthArgs.java | 132 ++ .../GetSqlWarehouseHealthFailureReason.java | 86 ++ ...etSqlWarehouseHealthFailureReasonArgs.java | 99 ++ .../inputs/GetSqlWarehouseOdbcParams.java | 53 +- .../inputs/GetSqlWarehouseOdbcParamsArgs.java | 57 +- .../inputs/GetSqlWarehousePlainArgs.java | 137 ++ .../inputs/GetSqlWarehouseTags.java | 16 +- .../inputs/GetSqlWarehouseTagsArgs.java | 16 +- .../inputs/GetSqlWarehouseTagsCustomTag.java | 29 +- .../GetSqlWarehouseTagsCustomTagArgs.java | 29 +- .../pulumi/databricks/inputs/GrantState.java | 306 ++++ .../inputs/MetastoreDataAccessState.java | 17 + .../pulumi/databricks/inputs/RepoState.java | 37 + .../inputs/SqlEndpointChannelArgs.java | 17 + .../inputs/SqlEndpointHealthArgs.java | 132 ++ .../SqlEndpointHealthFailureReasonArgs.java | 99 ++ .../inputs/SqlEndpointOdbcParamsArgs.java | 57 +- .../databricks/inputs/SqlEndpointState.java | 171 ++- .../inputs/SqlEndpointTagsArgs.java | 16 +- .../inputs/StorageCredentialState.java | 37 + .../GetCurrentMetastoreMetastoreInfo.java | 407 ++++++ .../outputs/GetCurrentMetastoreResult.java | 82 ++ .../outputs/GetDirectoryResult.java | 23 + .../outputs/GetMetastoreMetastoreInfo.java | 4 +- .../outputs/GetSqlWarehouseChannel.java | 13 + .../outputs/GetSqlWarehouseHealth.java | 102 ++ .../GetSqlWarehouseHealthFailureReason.java | 77 + .../outputs/GetSqlWarehouseOdbcParams.java | 56 +- .../outputs/GetSqlWarehouseResult.java | 109 ++ .../outputs/GetSqlWarehouseTags.java | 14 +- .../outputs/GetSqlWarehouseTagsCustomTag.java | 31 +- .../outputs/SqlEndpointChannel.java | 13 + .../databricks/outputs/SqlEndpointHealth.java | 102 ++ .../SqlEndpointHealthFailureReason.java | 77 + .../outputs/SqlEndpointOdbcParams.java | 56 +- .../databricks/outputs/SqlEndpointTags.java | 14 +- sdk/nodejs/accessControlRuleSet.ts | 2 + sdk/nodejs/connection.ts | 2 + sdk/nodejs/defaultNamespaceSetting.ts | 2 + sdk/nodejs/directory.ts | 10 + sdk/nodejs/externalLocation.ts | 2 + sdk/nodejs/getCurrentMetastore.ts | 116 ++ sdk/nodejs/getDirectory.ts | 4 + sdk/nodejs/getServicePrincipal.ts | 4 +- sdk/nodejs/getSqlWarehouse.ts | 70 + sdk/nodejs/grant.ts | 148 ++ sdk/nodejs/index.ts | 13 + sdk/nodejs/metastore.ts | 2 + sdk/nodejs/metastoreAssignment.ts | 2 + sdk/nodejs/metastoreDataAccess.ts | 7 + sdk/nodejs/metastoreProvider.ts | 2 + sdk/nodejs/recipient.ts | 2 + sdk/nodejs/registeredModel.ts | 2 + sdk/nodejs/repo.ts | 10 + sdk/nodejs/schema.ts | 2 + sdk/nodejs/sqlEndpoint.ts | 74 +- sdk/nodejs/storageCredential.ts | 16 + sdk/nodejs/systemSchema.ts | 3 +- sdk/nodejs/tsconfig.json | 2 + sdk/nodejs/types/input.ts | 228 ++- sdk/nodejs/types/output.ts | 126 +- sdk/nodejs/volume.ts | 2 + sdk/python/pulumi_databricks/__init__.py | 10 + sdk/python/pulumi_databricks/_inputs.py | 686 +++++++-- .../access_control_rule_set.py | 4 + sdk/python/pulumi_databricks/connection.py | 4 + .../default_namespace_setting.py | 4 + sdk/python/pulumi_databricks/directory.py | 32 +- .../pulumi_databricks/external_location.py | 4 + .../get_current_metastore.py | 141 ++ sdk/python/pulumi_databricks/get_directory.py | 19 +- .../get_service_principal.py | 4 +- .../pulumi_databricks/get_sql_warehouse.py | 88 +- sdk/python/pulumi_databricks/grant.py | 608 ++++++++ sdk/python/pulumi_databricks/metastore.py | 4 + .../pulumi_databricks/metastore_assignment.py | 4 + .../metastore_data_access.py | 44 +- .../pulumi_databricks/metastore_provider.py | 4 + sdk/python/pulumi_databricks/outputs.py | 528 ++++++- sdk/python/pulumi_databricks/recipient.py | 4 + .../pulumi_databricks/registered_model.py | 4 + sdk/python/pulumi_databricks/repo.py | 32 +- sdk/python/pulumi_databricks/schema.py | 4 + sdk/python/pulumi_databricks/sql_endpoint.py | 190 ++- .../pulumi_databricks/storage_credential.py | 57 +- sdk/python/pulumi_databricks/system_schema.py | 6 +- sdk/python/pulumi_databricks/volume.py | 4 + 210 files changed, 12321 insertions(+), 1068 deletions(-) create mode 100644 sdk/dotnet/GetCurrentMetastore.cs create mode 100644 sdk/dotnet/Grant.cs create mode 100644 sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfo.cs create mode 100644 sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfoArgs.cs create mode 100644 sdk/dotnet/Inputs/GetSqlWarehouseHealth.cs create mode 100644 sdk/dotnet/Inputs/GetSqlWarehouseHealthArgs.cs create mode 100644 sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReason.cs create mode 100644 sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReasonArgs.cs create mode 100644 sdk/dotnet/Inputs/SqlEndpointHealthArgs.cs create mode 100644 sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonArgs.cs create mode 100644 sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonGetArgs.cs create mode 100644 sdk/dotnet/Inputs/SqlEndpointHealthGetArgs.cs create mode 100644 sdk/dotnet/Outputs/GetCurrentMetastoreMetastoreInfoResult.cs create mode 100644 sdk/dotnet/Outputs/GetSqlWarehouseHealthFailureReasonResult.cs create mode 100644 sdk/dotnet/Outputs/GetSqlWarehouseHealthResult.cs create mode 100644 sdk/dotnet/Outputs/SqlEndpointHealth.cs create mode 100644 sdk/dotnet/Outputs/SqlEndpointHealthFailureReason.cs create mode 100644 sdk/go/databricks/getCurrentMetastore.go create mode 100644 sdk/go/databricks/grant.go create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/Grant.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/GrantArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfo.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfoArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastorePlainArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealth.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReason.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReasonArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantState.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthFailureReasonArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreMetastoreInfo.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreResult.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealth.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealthFailureReason.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealth.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealthFailureReason.java create mode 100644 sdk/nodejs/getCurrentMetastore.ts create mode 100644 sdk/nodejs/grant.ts create mode 100644 sdk/python/pulumi_databricks/get_current_metastore.py create mode 100644 sdk/python/pulumi_databricks/grant.py diff --git a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json index ef99e418..6dfe7d8d 100644 --- a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json +++ b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json @@ -227,6 +227,15 @@ "current": "databricks:index/globalInitScript:GlobalInitScript", "majorVersion": 1 }, + "databricks_grant": { + "current": "databricks:index/grant:Grant", + "majorVersion": 1, + "fields": { + "privileges": { + "maxItemsOne": false + } + } + }, "databricks_grants": { "current": "databricks:index/grants:Grants", "majorVersion": 1, @@ -1499,6 +1508,16 @@ "channel": { "maxItemsOne": true }, + "health": { + "maxItemsOne": false, + "elem": { + "fields": { + "failure_reason": { + "maxItemsOne": true + } + } + } + }, "odbc_params": { "maxItemsOne": true }, @@ -1890,6 +1909,15 @@ "current": "databricks:index/getCurrentConfig:getCurrentConfig", "majorVersion": 1 }, + "databricks_current_metastore": { + "current": "databricks:index/getCurrentMetastore:getCurrentMetastore", + "majorVersion": 1, + "fields": { + "metastore_info": { + "maxItemsOne": true + } + } + }, "databricks_current_user": { "current": "databricks:index/getCurrentUser:getCurrentUser", "majorVersion": 1 @@ -2809,6 +2837,16 @@ "channel": { "maxItemsOne": true }, + "health": { + "maxItemsOne": true, + "elem": { + "fields": { + "failure_reason": { + "maxItemsOne": true + } + } + } + }, "odbc_params": { "maxItemsOne": true }, @@ -2866,7 +2904,15 @@ } } }, - "auto-settings": {}, + "auto-settings": { + "resources": { + "databricks_sql_endpoint": { + "maxItemsOneOverrides": { + "odbc_params": true + } + } + } + }, "renames": { "resources": { "databricks:index/accessControlRuleSet:AccessControlRuleSet": "databricks_access_control_rule_set", @@ -2883,6 +2929,7 @@ "databricks:index/externalLocation:ExternalLocation": "databricks_external_location", "databricks:index/gitCredential:GitCredential": "databricks_git_credential", "databricks:index/globalInitScript:GlobalInitScript": "databricks_global_init_script", + "databricks:index/grant:Grant": "databricks_grant", "databricks:index/grants:Grants": "databricks_grants", "databricks:index/group:Group": "databricks_group", "databricks:index/groupInstanceProfile:GroupInstanceProfile": "databricks_group_instance_profile", @@ -2956,6 +3003,7 @@ "databricks:index/getClusterPolicy:getClusterPolicy": "databricks_cluster_policy", "databricks:index/getClusters:getClusters": "databricks_clusters", "databricks:index/getCurrentConfig:getCurrentConfig": "databricks_current_config", + "databricks:index/getCurrentMetastore:getCurrentMetastore": "databricks_current_metastore", "databricks:index/getCurrentUser:getCurrentUser": "databricks_current_user", "databricks:index/getDbfsFile:getDbfsFile": "databricks_dbfs_file", "databricks:index/getDbfsFilePaths:getDbfsFilePaths": "databricks_dbfs_file_paths", @@ -3748,6 +3796,12 @@ "customSubject": "custom_subject", "emptyResultState": "empty_result_state" }, + "databricks:index/SqlEndpointChannel:SqlEndpointChannel": { + "dbsqlVersion": "dbsql_version" + }, + "databricks:index/SqlEndpointHealth:SqlEndpointHealth": { + "failureReason": "failure_reason" + }, "databricks:index/SqlEndpointTags:SqlEndpointTags": { "customTags": "custom_tags" }, @@ -3896,7 +3950,8 @@ }, "databricks:index/directory:Directory": { "deleteRecursive": "delete_recursive", - "objectId": "object_id" + "objectId": "object_id", + "workspacePath": "workspace_path" }, "databricks:index/entitlements:Entitlements": { "allowClusterCreate": "allow_cluster_create", @@ -4058,6 +4113,25 @@ "cloudType": "cloud_type", "isAccount": "is_account" }, + "databricks:index/getCurrentMetastore:getCurrentMetastore": { + "metastoreInfo": "metastore_info" + }, + "databricks:index/getCurrentMetastoreMetastoreInfo:getCurrentMetastoreMetastoreInfo": { + "createdAt": "created_at", + "createdBy": "created_by", + "defaultDataAccessConfigId": "default_data_access_config_id", + "deltaSharingOrganizationName": "delta_sharing_organization_name", + "deltaSharingRecipientTokenLifetimeInSeconds": "delta_sharing_recipient_token_lifetime_in_seconds", + "deltaSharingScope": "delta_sharing_scope", + "globalMetastoreId": "global_metastore_id", + "metastoreId": "metastore_id", + "privilegeModelVersion": "privilege_model_version", + "storageRoot": "storage_root", + "storageRootCredentialId": "storage_root_credential_id", + "storageRootCredentialName": "storage_root_credential_name", + "updatedAt": "updated_at", + "updatedBy": "updated_by" + }, "databricks:index/getCurrentUser:getCurrentUser": { "aclPrincipalId": "acl_principal_id", "externalId": "external_id", @@ -4075,7 +4149,8 @@ "fileSize": "file_size" }, "databricks:index/getDirectory:getDirectory": { - "objectId": "object_id" + "objectId": "object_id", + "workspacePath": "workspace_path" }, "databricks:index/getGroup:getGroup": { "aclPrincipalId": "acl_principal_id", @@ -4720,6 +4795,7 @@ "databricks:index/getSqlWarehouse:getSqlWarehouse": { "autoStopMins": "auto_stop_mins", "clusterSize": "cluster_size", + "creatorName": "creator_name", "dataSourceId": "data_source_id", "enablePhoton": "enable_photon", "enableServerlessCompute": "enable_serverless_compute", @@ -4727,9 +4803,17 @@ "jdbcUrl": "jdbc_url", "maxNumClusters": "max_num_clusters", "minNumClusters": "min_num_clusters", + "numActiveSessions": "num_active_sessions", "numClusters": "num_clusters", "odbcParams": "odbc_params", - "spotInstancePolicy": "spot_instance_policy" + "spotInstancePolicy": "spot_instance_policy", + "warehouseType": "warehouse_type" + }, + "databricks:index/getSqlWarehouseChannel:getSqlWarehouseChannel": { + "dbsqlVersion": "dbsql_version" + }, + "databricks:index/getSqlWarehouseHealth:getSqlWarehouseHealth": { + "failureReason": "failure_reason" }, "databricks:index/getSqlWarehouseTags:getSqlWarehouseTags": { "customTags": "custom_tags" @@ -4764,6 +4848,11 @@ "databricks:index/globalInitScript:GlobalInitScript": { "contentBase64": "content_base64" }, + "databricks:index/grant:Grant": { + "externalLocation": "external_location", + "foreignConnection": "foreign_connection", + "storageCredential": "storage_credential" + }, "databricks:index/grants:Grants": { "externalLocation": "external_location", "foreignConnection": "foreign_connection", @@ -4881,7 +4970,8 @@ "gcpServiceAccountKey": "gcp_service_account_key", "isDefault": "is_default", "metastoreId": "metastore_id", - "readOnly": "read_only" + "readOnly": "read_only", + "skipValidation": "skip_validation" }, "databricks:index/metastoreProvider:MetastoreProvider": { "authenticationType": "authentication_type", @@ -5064,7 +5154,8 @@ "databricks:index/repo:Repo": { "commitHash": "commit_hash", "gitProvider": "git_provider", - "sparseCheckout": "sparse_checkout" + "sparseCheckout": "sparse_checkout", + "workspacePath": "workspace_path" }, "databricks:index/schema:Schema": { "catalogName": "catalog_name", @@ -5120,13 +5211,16 @@ "databricks:index/sqlEndpoint:SqlEndpoint": { "autoStopMins": "auto_stop_mins", "clusterSize": "cluster_size", + "creatorName": "creator_name", "dataSourceId": "data_source_id", "enablePhoton": "enable_photon", "enableServerlessCompute": "enable_serverless_compute", + "healths": "health", "instanceProfileArn": "instance_profile_arn", "jdbcUrl": "jdbc_url", "maxNumClusters": "max_num_clusters", "minNumClusters": "min_num_clusters", + "numActiveSessions": "num_active_sessions", "numClusters": "num_clusters", "odbcParams": "odbc_params", "spotInstancePolicy": "spot_instance_policy", @@ -5186,7 +5280,8 @@ "forceUpdate": "force_update", "gcpServiceAccountKey": "gcp_service_account_key", "metastoreId": "metastore_id", - "readOnly": "read_only" + "readOnly": "read_only", + "skipValidation": "skip_validation" }, "databricks:index/systemSchema:SystemSchema": { "metastoreId": "metastore_id" diff --git a/provider/cmd/pulumi-resource-databricks/schema.json b/provider/cmd/pulumi-resource-databricks/schema.json index 23b91517..66b26225 100644 --- a/provider/cmd/pulumi-resource-databricks/schema.json +++ b/provider/cmd/pulumi-resource-databricks/schema.json @@ -5073,6 +5073,9 @@ }, "databricks:index/SqlEndpointChannel:SqlEndpointChannel": { "properties": { + "dbsqlVersion": { + "type": "string" + }, "name": { "type": "string", "description": "Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`.\n" @@ -5080,11 +5083,45 @@ }, "type": "object" }, - "databricks:index/SqlEndpointOdbcParams:SqlEndpointOdbcParams": { + "databricks:index/SqlEndpointHealth:SqlEndpointHealth": { + "properties": { + "details": { + "type": "string" + }, + "failureReason": { + "$ref": "#/types/databricks:index/SqlEndpointHealthFailureReason:SqlEndpointHealthFailureReason" + }, + "message": { + "type": "string" + }, + "status": { + "type": "string" + }, + "summary": { + "type": "string" + } + }, + "type": "object" + }, + "databricks:index/SqlEndpointHealthFailureReason:SqlEndpointHealthFailureReason": { "properties": { - "host": { + "code": { "type": "string" }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + } + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "databricks:index/SqlEndpointOdbcParams:SqlEndpointOdbcParams": { + "properties": { "hostname": { "type": "string" }, @@ -5098,12 +5135,7 @@ "type": "string" } }, - "type": "object", - "required": [ - "path", - "port", - "protocol" - ] + "type": "object" }, "databricks:index/SqlEndpointTags:SqlEndpointTags": { "properties": { @@ -5114,10 +5146,7 @@ } } }, - "type": "object", - "required": [ - "customTags" - ] + "type": "object" }, "databricks:index/SqlEndpointTagsCustomTag:SqlEndpointTagsCustomTag": { "properties": { @@ -6284,6 +6313,82 @@ }, "type": "object" }, + "databricks:index/getCurrentMetastoreMetastoreInfo:getCurrentMetastoreMetastoreInfo": { + "properties": { + "cloud": { + "type": "string" + }, + "createdAt": { + "type": "integer", + "description": "Timestamp (in milliseconds) when the current metastore was created.\n" + }, + "createdBy": { + "type": "string", + "description": "the ID of the identity that created the current metastore.\n" + }, + "defaultDataAccessConfigId": { + "type": "string", + "description": "the ID of the default data access configuration.\n" + }, + "deltaSharingOrganizationName": { + "type": "string", + "description": "The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing.\n" + }, + "deltaSharingRecipientTokenLifetimeInSeconds": { + "type": "integer", + "description": "the expiration duration in seconds on recipient data access tokens.\n" + }, + "deltaSharingScope": { + "type": "string", + "description": "Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL.\n" + }, + "globalMetastoreId": { + "type": "string", + "description": "Identifier in form of `\u003ccloud\u003e:\u003cregion\u003e:\u003cmetastore_id\u003e` for use in Databricks to Databricks Delta Sharing.\n" + }, + "metastoreId": { + "type": "string", + "description": "Metastore ID.\n" + }, + "name": { + "type": "string", + "description": "Name of metastore.\n" + }, + "owner": { + "type": "string", + "description": "Username/group name/sp application_id of the metastore owner.\n" + }, + "privilegeModelVersion": { + "type": "string", + "description": "the version of the privilege model used by the metastore.\n" + }, + "region": { + "type": "string", + "description": "(Mandatory for account-level) The region of the metastore.\n" + }, + "storageRoot": { + "type": "string", + "description": "Path on cloud storage account, where managed `databricks.Table` are stored.\n" + }, + "storageRootCredentialId": { + "type": "string", + "description": "ID of a storage credential used for the `storage_root`.\n" + }, + "storageRootCredentialName": { + "type": "string", + "description": "Name of a storage credential used for the `storage_root`.\n" + }, + "updatedAt": { + "type": "integer", + "description": "Timestamp (in milliseconds) when the current metastore was updated.\n" + }, + "updatedBy": { + "type": "string", + "description": "the ID of the identity that updated the current metastore.\n" + } + }, + "type": "object" + }, "databricks:index/getDbfsFilePathsPathList:getDbfsFilePathsPathList": { "properties": { "fileSize": { @@ -9294,7 +9399,7 @@ }, "storageRoot": { "type": "string", - "description": "Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource.\n" + "description": "Path on cloud storage account, where managed `databricks.Table` are stored.\n" }, "storageRootCredentialId": { "type": "string" @@ -9493,6 +9598,9 @@ }, "databricks:index/getSqlWarehouseChannel:getSqlWarehouseChannel": { "properties": { + "dbsqlVersion": { + "type": "string" + }, "name": { "type": "string", "description": "Name of the SQL warehouse to search (case-sensitive).\n" @@ -9500,11 +9608,45 @@ }, "type": "object" }, - "databricks:index/getSqlWarehouseOdbcParams:getSqlWarehouseOdbcParams": { + "databricks:index/getSqlWarehouseHealth:getSqlWarehouseHealth": { + "properties": { + "details": { + "type": "string" + }, + "failureReason": { + "$ref": "#/types/databricks:index/getSqlWarehouseHealthFailureReason:getSqlWarehouseHealthFailureReason" + }, + "message": { + "type": "string" + }, + "status": { + "type": "string" + }, + "summary": { + "type": "string" + } + }, + "type": "object" + }, + "databricks:index/getSqlWarehouseHealthFailureReason:getSqlWarehouseHealthFailureReason": { "properties": { - "host": { + "code": { "type": "string" }, + "parameters": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + } + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "databricks:index/getSqlWarehouseOdbcParams:getSqlWarehouseOdbcParams": { + "properties": { "hostname": { "type": "string" }, @@ -9518,12 +9660,7 @@ "type": "string" } }, - "type": "object", - "required": [ - "path", - "port", - "protocol" - ] + "type": "object" }, "databricks:index/getSqlWarehouseTags:getSqlWarehouseTags": { "properties": { @@ -9534,10 +9671,7 @@ } } }, - "type": "object", - "required": [ - "customTags" - ] + "type": "object" }, "databricks:index/getSqlWarehouseTagsCustomTag:getSqlWarehouseTagsCustomTag": { "properties": { @@ -9548,11 +9682,7 @@ "type": "string" } }, - "type": "object", - "required": [ - "key", - "value" - ] + "type": "object" } }, "provider": { @@ -9750,7 +9880,7 @@ }, "resources": { "databricks:index/accessControlRuleSet:AccessControlRuleSet": { - "description": "This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace.\n\n\u003e **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`.\n\n\u003e **Warning** `databricks.AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information.\n\n## Service principal rule set usage\n\nThrough a Databricks workspace:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.then(ds =\u003e ds.aclPrincipalId)],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.Apply(getGroupResult =\u003e getGroupResult.AclPrincipalId),\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(ds.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.applyValue(getGroupResult -\u003e getGroupResult.aclPrincipalId()))\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n```\n\nThrough AWS Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\nThrough Azure Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {\n applicationId: \"00000000-0000-0000-0000-000000000000\",\n displayName: \"SP_FOR_AUTOMATION\",\n});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\",\n application_id=\"00000000-0000-0000-0000-000000000000\",\n display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n ApplicationId = \"00000000-0000-0000-0000-000000000000\",\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tApplicationId: pulumi.String(\"00000000-0000-0000-0000-000000000000\"),\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .applicationId(\"00000000-0000-0000-0000-000000000000\")\n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n applicationId: 00000000-0000-0000-0000-000000000000\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\nThrough GCP Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\n## Group rule set usage\n\nRefer to the appropriate provider configuration as shown in the examples for service principal rule set.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst john = databricks.getUser({\n userName: \"john.doe@example.com\",\n});\nconst dsGroupRuleSet = new databricks.AccessControlRuleSet(\"dsGroupRuleSet\", {grantRules: [{\n principals: [john.then(john =\u003e john.aclPrincipalId)],\n role: \"roles/group.manager\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\njohn = databricks.get_user(user_name=\"john.doe@example.com\")\nds_group_rule_set = databricks.AccessControlRuleSet(\"dsGroupRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[john.acl_principal_id],\n role=\"roles/group.manager\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var john = Databricks.GetUser.Invoke(new()\n {\n UserName = \"john.doe@example.com\",\n });\n\n var dsGroupRuleSet = new Databricks.AccessControlRuleSet(\"dsGroupRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n john.Apply(getUserResult =\u003e getUserResult.AclPrincipalId),\n },\n Role = \"roles/group.manager\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\t_, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjohn, err := databricks.LookupUser(ctx, \u0026databricks.LookupUserArgs{\n\t\t\tUserName: pulumi.StringRef(\"john.doe@example.com\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"dsGroupRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(john.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/group.manager\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.inputs.GetUserArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n final var john = DatabricksFunctions.getUser(GetUserArgs.builder()\n .userName(\"john.doe@example.com\")\n .build());\n\n var dsGroupRuleSet = new AccessControlRuleSet(\"dsGroupRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(john.applyValue(getUserResult -\u003e getUserResult.aclPrincipalId()))\n .role(\"roles/group.manager\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dsGroupRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${john.aclPrincipalId}\n role: roles/group.manager\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n john:\n fn::invoke:\n Function: databricks:getUser\n Arguments:\n userName: john.doe@example.com\n```\n\n## Account rule set usage\n\nRefer to the appropriate provider configuration as shown in the examples for service principal rule set.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst marketplaceAdmins = databricks.getGroup({\n displayName: \"Marketplace Admins\",\n});\nconst john = databricks.getUser({\n userName: \"john.doe@example.com\",\n});\nconst accountRuleSet = new databricks.AccessControlRuleSet(\"accountRuleSet\", {grantRules: [\n {\n principals: [john.then(john =\u003e john.aclPrincipalId)],\n role: \"roles/group.manager\",\n },\n {\n principals: [ds.then(ds =\u003e ds.aclPrincipalId)],\n role: \"roles/servicePrincipal.manager\",\n },\n {\n principals: [marketplaceAdmins.then(marketplaceAdmins =\u003e marketplaceAdmins.aclPrincipalId)],\n role: \"roles/marketplace.admin\",\n },\n]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\nmarketplace_admins = databricks.get_group(display_name=\"Marketplace Admins\")\njohn = databricks.get_user(user_name=\"john.doe@example.com\")\naccount_rule_set = databricks.AccessControlRuleSet(\"accountRuleSet\", grant_rules=[\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[john.acl_principal_id],\n role=\"roles/group.manager\",\n ),\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.manager\",\n ),\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[marketplace_admins.acl_principal_id],\n role=\"roles/marketplace.admin\",\n ),\n])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var marketplaceAdmins = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Marketplace Admins\",\n });\n\n var john = Databricks.GetUser.Invoke(new()\n {\n UserName = \"john.doe@example.com\",\n });\n\n var accountRuleSet = new Databricks.AccessControlRuleSet(\"accountRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n john.Apply(getUserResult =\u003e getUserResult.AclPrincipalId),\n },\n Role = \"roles/group.manager\",\n },\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.Apply(getGroupResult =\u003e getGroupResult.AclPrincipalId),\n },\n Role = \"roles/servicePrincipal.manager\",\n },\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n marketplaceAdmins.Apply(getGroupResult =\u003e getGroupResult.AclPrincipalId),\n },\n Role = \"roles/marketplace.admin\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmarketplaceAdmins, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Marketplace Admins\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjohn, err := databricks.LookupUser(ctx, \u0026databricks.LookupUserArgs{\n\t\t\tUserName: pulumi.StringRef(\"john.doe@example.com\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"accountRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(john.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/group.manager\"),\n\t\t\t\t},\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(ds.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.manager\"),\n\t\t\t\t},\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(marketplaceAdmins.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/marketplace.admin\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.inputs.GetUserArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n final var marketplaceAdmins = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Marketplace Admins\")\n .build());\n\n final var john = DatabricksFunctions.getUser(GetUserArgs.builder()\n .userName(\"john.doe@example.com\")\n .build());\n\n var accountRuleSet = new AccessControlRuleSet(\"accountRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules( \n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(john.applyValue(getUserResult -\u003e getUserResult.aclPrincipalId()))\n .role(\"roles/group.manager\")\n .build(),\n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.applyValue(getGroupResult -\u003e getGroupResult.aclPrincipalId()))\n .role(\"roles/servicePrincipal.manager\")\n .build(),\n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(marketplaceAdmins.applyValue(getGroupResult -\u003e getGroupResult.aclPrincipalId()))\n .role(\"roles/marketplace.admin\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n accountRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n # user john is manager for all groups in the account\n grantRules:\n - principals:\n - ${john.aclPrincipalId}\n role: roles/group.manager\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.manager\n - principals:\n - ${marketplaceAdmins.aclPrincipalId}\n role: roles/marketplace.admin\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n marketplaceAdmins:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Marketplace Admins\n john:\n fn::invoke:\n Function: databricks:getUser\n Arguments:\n userName: john.doe@example.com\n```\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.Group\n* databricks.User\n* databricks.ServicePrincipal\n", + "description": "\u003e **Note** This resource could be used with account or workspace-level provider.\n\nThis resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace.\n\n\u003e **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`.\n\n\u003e **Warning** `databricks.AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information.\n\n## Service principal rule set usage\n\nThrough a Databricks workspace:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.then(ds =\u003e ds.aclPrincipalId)],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.Apply(getGroupResult =\u003e getGroupResult.AclPrincipalId),\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(ds.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.applyValue(getGroupResult -\u003e getGroupResult.aclPrincipalId()))\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n```\n\nThrough AWS Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\nThrough Azure Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {\n applicationId: \"00000000-0000-0000-0000-000000000000\",\n displayName: \"SP_FOR_AUTOMATION\",\n});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\",\n application_id=\"00000000-0000-0000-0000-000000000000\",\n display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n ApplicationId = \"00000000-0000-0000-0000-000000000000\",\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tApplicationId: pulumi.String(\"00000000-0000-0000-0000-000000000000\"),\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .applicationId(\"00000000-0000-0000-0000-000000000000\")\n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n applicationId: 00000000-0000-0000-0000-000000000000\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\nThrough GCP Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\n## Group rule set usage\n\nRefer to the appropriate provider configuration as shown in the examples for service principal rule set.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst john = databricks.getUser({\n userName: \"john.doe@example.com\",\n});\nconst dsGroupRuleSet = new databricks.AccessControlRuleSet(\"dsGroupRuleSet\", {grantRules: [{\n principals: [john.then(john =\u003e john.aclPrincipalId)],\n role: \"roles/group.manager\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\njohn = databricks.get_user(user_name=\"john.doe@example.com\")\nds_group_rule_set = databricks.AccessControlRuleSet(\"dsGroupRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[john.acl_principal_id],\n role=\"roles/group.manager\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var john = Databricks.GetUser.Invoke(new()\n {\n UserName = \"john.doe@example.com\",\n });\n\n var dsGroupRuleSet = new Databricks.AccessControlRuleSet(\"dsGroupRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n john.Apply(getUserResult =\u003e getUserResult.AclPrincipalId),\n },\n Role = \"roles/group.manager\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\t_, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjohn, err := databricks.LookupUser(ctx, \u0026databricks.LookupUserArgs{\n\t\t\tUserName: pulumi.StringRef(\"john.doe@example.com\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"dsGroupRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(john.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/group.manager\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.inputs.GetUserArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n final var john = DatabricksFunctions.getUser(GetUserArgs.builder()\n .userName(\"john.doe@example.com\")\n .build());\n\n var dsGroupRuleSet = new AccessControlRuleSet(\"dsGroupRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(john.applyValue(getUserResult -\u003e getUserResult.aclPrincipalId()))\n .role(\"roles/group.manager\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dsGroupRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${john.aclPrincipalId}\n role: roles/group.manager\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n john:\n fn::invoke:\n Function: databricks:getUser\n Arguments:\n userName: john.doe@example.com\n```\n\n## Account rule set usage\n\nRefer to the appropriate provider configuration as shown in the examples for service principal rule set.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst marketplaceAdmins = databricks.getGroup({\n displayName: \"Marketplace Admins\",\n});\nconst john = databricks.getUser({\n userName: \"john.doe@example.com\",\n});\nconst accountRuleSet = new databricks.AccessControlRuleSet(\"accountRuleSet\", {grantRules: [\n {\n principals: [john.then(john =\u003e john.aclPrincipalId)],\n role: \"roles/group.manager\",\n },\n {\n principals: [ds.then(ds =\u003e ds.aclPrincipalId)],\n role: \"roles/servicePrincipal.manager\",\n },\n {\n principals: [marketplaceAdmins.then(marketplaceAdmins =\u003e marketplaceAdmins.aclPrincipalId)],\n role: \"roles/marketplace.admin\",\n },\n]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\nmarketplace_admins = databricks.get_group(display_name=\"Marketplace Admins\")\njohn = databricks.get_user(user_name=\"john.doe@example.com\")\naccount_rule_set = databricks.AccessControlRuleSet(\"accountRuleSet\", grant_rules=[\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[john.acl_principal_id],\n role=\"roles/group.manager\",\n ),\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.manager\",\n ),\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[marketplace_admins.acl_principal_id],\n role=\"roles/marketplace.admin\",\n ),\n])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var marketplaceAdmins = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Marketplace Admins\",\n });\n\n var john = Databricks.GetUser.Invoke(new()\n {\n UserName = \"john.doe@example.com\",\n });\n\n var accountRuleSet = new Databricks.AccessControlRuleSet(\"accountRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n john.Apply(getUserResult =\u003e getUserResult.AclPrincipalId),\n },\n Role = \"roles/group.manager\",\n },\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.Apply(getGroupResult =\u003e getGroupResult.AclPrincipalId),\n },\n Role = \"roles/servicePrincipal.manager\",\n },\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n marketplaceAdmins.Apply(getGroupResult =\u003e getGroupResult.AclPrincipalId),\n },\n Role = \"roles/marketplace.admin\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmarketplaceAdmins, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Marketplace Admins\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjohn, err := databricks.LookupUser(ctx, \u0026databricks.LookupUserArgs{\n\t\t\tUserName: pulumi.StringRef(\"john.doe@example.com\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"accountRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(john.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/group.manager\"),\n\t\t\t\t},\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(ds.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.manager\"),\n\t\t\t\t},\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(marketplaceAdmins.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/marketplace.admin\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.inputs.GetUserArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n final var marketplaceAdmins = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Marketplace Admins\")\n .build());\n\n final var john = DatabricksFunctions.getUser(GetUserArgs.builder()\n .userName(\"john.doe@example.com\")\n .build());\n\n var accountRuleSet = new AccessControlRuleSet(\"accountRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules( \n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(john.applyValue(getUserResult -\u003e getUserResult.aclPrincipalId()))\n .role(\"roles/group.manager\")\n .build(),\n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.applyValue(getGroupResult -\u003e getGroupResult.aclPrincipalId()))\n .role(\"roles/servicePrincipal.manager\")\n .build(),\n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(marketplaceAdmins.applyValue(getGroupResult -\u003e getGroupResult.aclPrincipalId()))\n .role(\"roles/marketplace.admin\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n accountRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n # user john is manager for all groups in the account\n grantRules:\n - principals:\n - ${john.aclPrincipalId}\n role: roles/group.manager\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.manager\n - principals:\n - ${marketplaceAdmins.aclPrincipalId}\n role: roles/marketplace.admin\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n marketplaceAdmins:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Marketplace Admins\n john:\n fn::invoke:\n Function: databricks:getUser\n Arguments:\n userName: john.doe@example.com\n```\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.Group\n* databricks.User\n* databricks.ServicePrincipal\n", "properties": { "etag": { "type": "string" @@ -10762,7 +10892,7 @@ } }, "databricks:index/connection:Connection": { - "description": "Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following:\n\n- A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system.\n- A foreign catalog\n\nThis resource manages connections in Unity Catalog\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\nCreate a connection to a MySQL database\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst mysql = new databricks.Connection(\"mysql\", {\n comment: \"this is a connection to mysql db\",\n connectionType: \"MYSQL\",\n options: {\n host: \"test.mysql.database.azure.com\",\n password: \"password\",\n port: \"3306\",\n user: \"user\",\n },\n properties: {\n purpose: \"testing\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nmysql = databricks.Connection(\"mysql\",\n comment=\"this is a connection to mysql db\",\n connection_type=\"MYSQL\",\n options={\n \"host\": \"test.mysql.database.azure.com\",\n \"password\": \"password\",\n \"port\": \"3306\",\n \"user\": \"user\",\n },\n properties={\n \"purpose\": \"testing\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var mysql = new Databricks.Connection(\"mysql\", new()\n {\n Comment = \"this is a connection to mysql db\",\n ConnectionType = \"MYSQL\",\n Options = \n {\n { \"host\", \"test.mysql.database.azure.com\" },\n { \"password\", \"password\" },\n { \"port\", \"3306\" },\n { \"user\", \"user\" },\n },\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewConnection(ctx, \"mysql\", \u0026databricks.ConnectionArgs{\n\t\t\tComment: pulumi.String(\"this is a connection to mysql db\"),\n\t\t\tConnectionType: pulumi.String(\"MYSQL\"),\n\t\t\tOptions: pulumi.Map{\n\t\t\t\t\"host\": pulumi.Any(\"test.mysql.database.azure.com\"),\n\t\t\t\t\"password\": pulumi.Any(\"password\"),\n\t\t\t\t\"port\": pulumi.Any(\"3306\"),\n\t\t\t\t\"user\": pulumi.Any(\"user\"),\n\t\t\t},\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Connection;\nimport com.pulumi.databricks.ConnectionArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var mysql = new Connection(\"mysql\", ConnectionArgs.builder() \n .comment(\"this is a connection to mysql db\")\n .connectionType(\"MYSQL\")\n .options(Map.ofEntries(\n Map.entry(\"host\", \"test.mysql.database.azure.com\"),\n Map.entry(\"password\", \"password\"),\n Map.entry(\"port\", \"3306\"),\n Map.entry(\"user\", \"user\")\n ))\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n mysql:\n type: databricks:Connection\n properties:\n comment: this is a connection to mysql db\n connectionType: MYSQL\n options:\n host: test.mysql.database.azure.com\n password: password\n port: '3306'\n user: user\n properties:\n purpose: testing\n```\n\nCreate a connection to a BigQuery database\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst bigquery = new databricks.Connection(\"bigquery\", {\n connectionType: \"BIGQUERY\",\n comment: \"this is a connection to BQ\",\n options: {\n GoogleServiceAccountKeyJson: JSON.stringify({\n type: \"service_account\",\n project_id: \"PROJECT_ID\",\n private_key_id: \"KEY_ID\",\n private_key: `-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n`,\n client_email: \"SERVICE_ACCOUNT_EMAIL\",\n client_id: \"CLIENT_ID\",\n auth_uri: \"https://accounts.google.com/o/oauth2/auth\",\n token_uri: \"https://accounts.google.com/o/oauth2/token\",\n auth_provider_x509_cert_url: \"https://www.googleapis.com/oauth2/v1/certs\",\n client_x509_cert_url: \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n universe_domain: \"googleapis.com\",\n }),\n },\n properties: {\n purpose: \"testing\",\n },\n});\n```\n```python\nimport pulumi\nimport json\nimport pulumi_databricks as databricks\n\nbigquery = databricks.Connection(\"bigquery\",\n connection_type=\"BIGQUERY\",\n comment=\"this is a connection to BQ\",\n options={\n \"GoogleServiceAccountKeyJson\": json.dumps({\n \"type\": \"service_account\",\n \"project_id\": \"PROJECT_ID\",\n \"private_key_id\": \"KEY_ID\",\n \"private_key\": \"\"\"-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n\"\"\",\n \"client_email\": \"SERVICE_ACCOUNT_EMAIL\",\n \"client_id\": \"CLIENT_ID\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n \"universe_domain\": \"googleapis.com\",\n }),\n },\n properties={\n \"purpose\": \"testing\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text.Json;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var bigquery = new Databricks.Connection(\"bigquery\", new()\n {\n ConnectionType = \"BIGQUERY\",\n Comment = \"this is a connection to BQ\",\n Options = \n {\n { \"GoogleServiceAccountKeyJson\", JsonSerializer.Serialize(new Dictionary\u003cstring, object?\u003e\n {\n [\"type\"] = \"service_account\",\n [\"project_id\"] = \"PROJECT_ID\",\n [\"private_key_id\"] = \"KEY_ID\",\n [\"private_key\"] = @\"-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n\",\n [\"client_email\"] = \"SERVICE_ACCOUNT_EMAIL\",\n [\"client_id\"] = \"CLIENT_ID\",\n [\"auth_uri\"] = \"https://accounts.google.com/o/oauth2/auth\",\n [\"token_uri\"] = \"https://accounts.google.com/o/oauth2/token\",\n [\"auth_provider_x509_cert_url\"] = \"https://www.googleapis.com/oauth2/v1/certs\",\n [\"client_x509_cert_url\"] = \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n [\"universe_domain\"] = \"googleapis.com\",\n }) },\n },\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"encoding/json\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\ttmpJSON0, err := json.Marshal(map[string]interface{}{\n\t\t\t\"type\": \"service_account\",\n\t\t\t\"project_id\": \"PROJECT_ID\",\n\t\t\t\"private_key_id\": \"KEY_ID\",\n\t\t\t\"private_key\": \"-----BEGIN PRIVATE KEY-----\\nPRIVATE_KEY\\n-----END PRIVATE KEY-----\\n\",\n\t\t\t\"client_email\": \"SERVICE_ACCOUNT_EMAIL\",\n\t\t\t\"client_id\": \"CLIENT_ID\",\n\t\t\t\"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n\t\t\t\"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n\t\t\t\"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n\t\t\t\"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n\t\t\t\"universe_domain\": \"googleapis.com\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson0 := string(tmpJSON0)\n\t\t_, err = databricks.NewConnection(ctx, \"bigquery\", \u0026databricks.ConnectionArgs{\n\t\t\tConnectionType: pulumi.String(\"BIGQUERY\"),\n\t\t\tComment: pulumi.String(\"this is a connection to BQ\"),\n\t\t\tOptions: pulumi.Map{\n\t\t\t\t\"GoogleServiceAccountKeyJson\": pulumi.String(json0),\n\t\t\t},\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Connection;\nimport com.pulumi.databricks.ConnectionArgs;\nimport static com.pulumi.codegen.internal.Serialization.*;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var bigquery = new Connection(\"bigquery\", ConnectionArgs.builder() \n .connectionType(\"BIGQUERY\")\n .comment(\"this is a connection to BQ\")\n .options(Map.of(\"GoogleServiceAccountKeyJson\", serializeJson(\n jsonObject(\n jsonProperty(\"type\", \"service_account\"),\n jsonProperty(\"project_id\", \"PROJECT_ID\"),\n jsonProperty(\"private_key_id\", \"KEY_ID\"),\n jsonProperty(\"private_key\", \"\"\"\n-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n \"\"\"),\n jsonProperty(\"client_email\", \"SERVICE_ACCOUNT_EMAIL\"),\n jsonProperty(\"client_id\", \"CLIENT_ID\"),\n jsonProperty(\"auth_uri\", \"https://accounts.google.com/o/oauth2/auth\"),\n jsonProperty(\"token_uri\", \"https://accounts.google.com/o/oauth2/token\"),\n jsonProperty(\"auth_provider_x509_cert_url\", \"https://www.googleapis.com/oauth2/v1/certs\"),\n jsonProperty(\"client_x509_cert_url\", \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\"),\n jsonProperty(\"universe_domain\", \"googleapis.com\")\n ))))\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n bigquery:\n type: databricks:Connection\n properties:\n connectionType: BIGQUERY\n comment: this is a connection to BQ\n options:\n GoogleServiceAccountKeyJson:\n fn::toJSON:\n type: service_account\n project_id: PROJECT_ID\n private_key_id: KEY_ID\n private_key: |\n -----BEGIN PRIVATE KEY-----\n PRIVATE_KEY\n -----END PRIVATE KEY-----\n client_email: SERVICE_ACCOUNT_EMAIL\n client_id: CLIENT_ID\n auth_uri: https://accounts.google.com/o/oauth2/auth\n token_uri: https://accounts.google.com/o/oauth2/token\n auth_provider_x509_cert_url: https://www.googleapis.com/oauth2/v1/certs\n client_x509_cert_url: https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\n universe_domain: googleapis.com\n properties:\n purpose: testing\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by `id`bash\n\n```sh\n $ pulumi import databricks:index/connection:Connection this '\u003cmetastore_id\u003e|\u003cname\u003e'\n```\n\n ", + "description": "\u003e **Note** This resource could be only used with workspace-level provider!\n\nLakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following:\n\n- A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system.\n- A foreign catalog\n\nThis resource manages connections in Unity Catalog\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\nCreate a connection to a MySQL database\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst mysql = new databricks.Connection(\"mysql\", {\n comment: \"this is a connection to mysql db\",\n connectionType: \"MYSQL\",\n options: {\n host: \"test.mysql.database.azure.com\",\n password: \"password\",\n port: \"3306\",\n user: \"user\",\n },\n properties: {\n purpose: \"testing\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nmysql = databricks.Connection(\"mysql\",\n comment=\"this is a connection to mysql db\",\n connection_type=\"MYSQL\",\n options={\n \"host\": \"test.mysql.database.azure.com\",\n \"password\": \"password\",\n \"port\": \"3306\",\n \"user\": \"user\",\n },\n properties={\n \"purpose\": \"testing\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var mysql = new Databricks.Connection(\"mysql\", new()\n {\n Comment = \"this is a connection to mysql db\",\n ConnectionType = \"MYSQL\",\n Options = \n {\n { \"host\", \"test.mysql.database.azure.com\" },\n { \"password\", \"password\" },\n { \"port\", \"3306\" },\n { \"user\", \"user\" },\n },\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewConnection(ctx, \"mysql\", \u0026databricks.ConnectionArgs{\n\t\t\tComment: pulumi.String(\"this is a connection to mysql db\"),\n\t\t\tConnectionType: pulumi.String(\"MYSQL\"),\n\t\t\tOptions: pulumi.Map{\n\t\t\t\t\"host\": pulumi.Any(\"test.mysql.database.azure.com\"),\n\t\t\t\t\"password\": pulumi.Any(\"password\"),\n\t\t\t\t\"port\": pulumi.Any(\"3306\"),\n\t\t\t\t\"user\": pulumi.Any(\"user\"),\n\t\t\t},\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Connection;\nimport com.pulumi.databricks.ConnectionArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var mysql = new Connection(\"mysql\", ConnectionArgs.builder() \n .comment(\"this is a connection to mysql db\")\n .connectionType(\"MYSQL\")\n .options(Map.ofEntries(\n Map.entry(\"host\", \"test.mysql.database.azure.com\"),\n Map.entry(\"password\", \"password\"),\n Map.entry(\"port\", \"3306\"),\n Map.entry(\"user\", \"user\")\n ))\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n mysql:\n type: databricks:Connection\n properties:\n comment: this is a connection to mysql db\n connectionType: MYSQL\n options:\n host: test.mysql.database.azure.com\n password: password\n port: '3306'\n user: user\n properties:\n purpose: testing\n```\n\nCreate a connection to a BigQuery database\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst bigquery = new databricks.Connection(\"bigquery\", {\n connectionType: \"BIGQUERY\",\n comment: \"this is a connection to BQ\",\n options: {\n GoogleServiceAccountKeyJson: JSON.stringify({\n type: \"service_account\",\n project_id: \"PROJECT_ID\",\n private_key_id: \"KEY_ID\",\n private_key: `-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n`,\n client_email: \"SERVICE_ACCOUNT_EMAIL\",\n client_id: \"CLIENT_ID\",\n auth_uri: \"https://accounts.google.com/o/oauth2/auth\",\n token_uri: \"https://accounts.google.com/o/oauth2/token\",\n auth_provider_x509_cert_url: \"https://www.googleapis.com/oauth2/v1/certs\",\n client_x509_cert_url: \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n universe_domain: \"googleapis.com\",\n }),\n },\n properties: {\n purpose: \"testing\",\n },\n});\n```\n```python\nimport pulumi\nimport json\nimport pulumi_databricks as databricks\n\nbigquery = databricks.Connection(\"bigquery\",\n connection_type=\"BIGQUERY\",\n comment=\"this is a connection to BQ\",\n options={\n \"GoogleServiceAccountKeyJson\": json.dumps({\n \"type\": \"service_account\",\n \"project_id\": \"PROJECT_ID\",\n \"private_key_id\": \"KEY_ID\",\n \"private_key\": \"\"\"-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n\"\"\",\n \"client_email\": \"SERVICE_ACCOUNT_EMAIL\",\n \"client_id\": \"CLIENT_ID\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n \"universe_domain\": \"googleapis.com\",\n }),\n },\n properties={\n \"purpose\": \"testing\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text.Json;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var bigquery = new Databricks.Connection(\"bigquery\", new()\n {\n ConnectionType = \"BIGQUERY\",\n Comment = \"this is a connection to BQ\",\n Options = \n {\n { \"GoogleServiceAccountKeyJson\", JsonSerializer.Serialize(new Dictionary\u003cstring, object?\u003e\n {\n [\"type\"] = \"service_account\",\n [\"project_id\"] = \"PROJECT_ID\",\n [\"private_key_id\"] = \"KEY_ID\",\n [\"private_key\"] = @\"-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n\",\n [\"client_email\"] = \"SERVICE_ACCOUNT_EMAIL\",\n [\"client_id\"] = \"CLIENT_ID\",\n [\"auth_uri\"] = \"https://accounts.google.com/o/oauth2/auth\",\n [\"token_uri\"] = \"https://accounts.google.com/o/oauth2/token\",\n [\"auth_provider_x509_cert_url\"] = \"https://www.googleapis.com/oauth2/v1/certs\",\n [\"client_x509_cert_url\"] = \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n [\"universe_domain\"] = \"googleapis.com\",\n }) },\n },\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"encoding/json\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\ttmpJSON0, err := json.Marshal(map[string]interface{}{\n\t\t\t\"type\": \"service_account\",\n\t\t\t\"project_id\": \"PROJECT_ID\",\n\t\t\t\"private_key_id\": \"KEY_ID\",\n\t\t\t\"private_key\": \"-----BEGIN PRIVATE KEY-----\\nPRIVATE_KEY\\n-----END PRIVATE KEY-----\\n\",\n\t\t\t\"client_email\": \"SERVICE_ACCOUNT_EMAIL\",\n\t\t\t\"client_id\": \"CLIENT_ID\",\n\t\t\t\"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n\t\t\t\"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n\t\t\t\"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n\t\t\t\"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\",\n\t\t\t\"universe_domain\": \"googleapis.com\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson0 := string(tmpJSON0)\n\t\t_, err = databricks.NewConnection(ctx, \"bigquery\", \u0026databricks.ConnectionArgs{\n\t\t\tConnectionType: pulumi.String(\"BIGQUERY\"),\n\t\t\tComment: pulumi.String(\"this is a connection to BQ\"),\n\t\t\tOptions: pulumi.Map{\n\t\t\t\t\"GoogleServiceAccountKeyJson\": pulumi.String(json0),\n\t\t\t},\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Connection;\nimport com.pulumi.databricks.ConnectionArgs;\nimport static com.pulumi.codegen.internal.Serialization.*;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var bigquery = new Connection(\"bigquery\", ConnectionArgs.builder() \n .connectionType(\"BIGQUERY\")\n .comment(\"this is a connection to BQ\")\n .options(Map.of(\"GoogleServiceAccountKeyJson\", serializeJson(\n jsonObject(\n jsonProperty(\"type\", \"service_account\"),\n jsonProperty(\"project_id\", \"PROJECT_ID\"),\n jsonProperty(\"private_key_id\", \"KEY_ID\"),\n jsonProperty(\"private_key\", \"\"\"\n-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY\n-----END PRIVATE KEY-----\n \"\"\"),\n jsonProperty(\"client_email\", \"SERVICE_ACCOUNT_EMAIL\"),\n jsonProperty(\"client_id\", \"CLIENT_ID\"),\n jsonProperty(\"auth_uri\", \"https://accounts.google.com/o/oauth2/auth\"),\n jsonProperty(\"token_uri\", \"https://accounts.google.com/o/oauth2/token\"),\n jsonProperty(\"auth_provider_x509_cert_url\", \"https://www.googleapis.com/oauth2/v1/certs\"),\n jsonProperty(\"client_x509_cert_url\", \"https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\"),\n jsonProperty(\"universe_domain\", \"googleapis.com\")\n ))))\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n bigquery:\n type: databricks:Connection\n properties:\n connectionType: BIGQUERY\n comment: this is a connection to BQ\n options:\n GoogleServiceAccountKeyJson:\n fn::toJSON:\n type: service_account\n project_id: PROJECT_ID\n private_key_id: KEY_ID\n private_key: |\n -----BEGIN PRIVATE KEY-----\n PRIVATE_KEY\n -----END PRIVATE KEY-----\n client_email: SERVICE_ACCOUNT_EMAIL\n client_id: CLIENT_ID\n auth_uri: https://accounts.google.com/o/oauth2/auth\n token_uri: https://accounts.google.com/o/oauth2/token\n auth_provider_x509_cert_url: https://www.googleapis.com/oauth2/v1/certs\n client_x509_cert_url: https://www.googleapis.com/robot/v1/metadata/x509/SERVICE_ACCOUNT_EMAIL\n universe_domain: googleapis.com\n properties:\n purpose: testing\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by `id`bash\n\n```sh\n $ pulumi import databricks:index/connection:Connection this '\u003cmetastore_id\u003e|\u003cname\u003e'\n```\n\n ", "properties": { "comment": { "type": "string", @@ -10994,7 +11124,7 @@ } }, "databricks:index/defaultNamespaceSetting:DefaultNamespaceSetting": { - "description": "The `databricks.DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace.\nSetting the default catalog for the workspace determines the catalog that is used when queries do not reference\na fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query\n'SELECT * FROM myTable' would reference the object 'retail_prod.default.myTable'\n(the schema 'default' is always assumed).\nThis setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute.\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.DefaultNamespaceSetting(\"this\", {namespace: {\n value: \"namespace_value\",\n}});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.DefaultNamespaceSetting(\"this\", namespace=databricks.DefaultNamespaceSettingNamespaceArgs(\n value=\"namespace_value\",\n))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.DefaultNamespaceSetting(\"this\", new()\n {\n Namespace = new Databricks.Inputs.DefaultNamespaceSettingNamespaceArgs\n {\n Value = \"namespace_value\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewDefaultNamespaceSetting(ctx, \"this\", \u0026databricks.DefaultNamespaceSettingArgs{\n\t\t\tNamespace: \u0026databricks.DefaultNamespaceSettingNamespaceArgs{\n\t\t\t\tValue: pulumi.String(\"namespace_value\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DefaultNamespaceSetting;\nimport com.pulumi.databricks.DefaultNamespaceSettingArgs;\nimport com.pulumi.databricks.inputs.DefaultNamespaceSettingNamespaceArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new DefaultNamespaceSetting(\"this\", DefaultNamespaceSettingArgs.builder() \n .namespace(DefaultNamespaceSettingNamespaceArgs.builder()\n .value(\"namespace_value\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:DefaultNamespaceSetting\n properties:\n namespace:\n value: namespace_value\n```\n{{% /example %}}\n{{% /examples %}}", + "description": "\u003e **Note** This resource could be only used with workspace-level provider!\n\nThe `databricks.DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace.\nSetting the default catalog for the workspace determines the catalog that is used when queries do not reference\na fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query\n'SELECT * FROM myTable' would reference the object 'retail_prod.default.myTable'\n(the schema 'default' is always assumed).\nThis setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute.\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.DefaultNamespaceSetting(\"this\", {namespace: {\n value: \"namespace_value\",\n}});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.DefaultNamespaceSetting(\"this\", namespace=databricks.DefaultNamespaceSettingNamespaceArgs(\n value=\"namespace_value\",\n))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.DefaultNamespaceSetting(\"this\", new()\n {\n Namespace = new Databricks.Inputs.DefaultNamespaceSettingNamespaceArgs\n {\n Value = \"namespace_value\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewDefaultNamespaceSetting(ctx, \"this\", \u0026databricks.DefaultNamespaceSettingArgs{\n\t\t\tNamespace: \u0026databricks.DefaultNamespaceSettingNamespaceArgs{\n\t\t\t\tValue: pulumi.String(\"namespace_value\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DefaultNamespaceSetting;\nimport com.pulumi.databricks.DefaultNamespaceSettingArgs;\nimport com.pulumi.databricks.inputs.DefaultNamespaceSettingNamespaceArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new DefaultNamespaceSetting(\"this\", DefaultNamespaceSettingArgs.builder() \n .namespace(DefaultNamespaceSettingNamespaceArgs.builder()\n .value(\"namespace_value\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:DefaultNamespaceSetting\n properties:\n namespace:\n value: namespace_value\n```\n{{% /example %}}\n{{% /examples %}}", "properties": { "etag": { "type": "string" @@ -11057,11 +11187,16 @@ "path": { "type": "string", "description": "The absolute path of the directory, beginning with \"/\", e.g. \"/Demo\".\n" + }, + "workspacePath": { + "type": "string", + "description": "path on Workspace File System (WSFS) in form of `/Workspace` + `path`\n" } }, "required": [ "objectId", - "path" + "path", + "workspacePath" ], "inputProperties": { "deleteRecursive": { @@ -11094,6 +11229,10 @@ "type": "string", "description": "The absolute path of the directory, beginning with \"/\", e.g. \"/Demo\".\n", "willReplaceOnChanges": true + }, + "workspacePath": { + "type": "string", + "description": "path on Workspace File System (WSFS) in form of `/Workspace` + `path`\n" } }, "type": "object" @@ -11203,7 +11342,7 @@ } }, "databricks:index/externalLocation:ExternalLocation": { - "description": "To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage:\n\n- databricks.StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential.\n- `databricks.ExternalLocation` are objects that combine a cloud storage path with a Storage Credential that can be used to access the location.\n\n\n## Import\n\nThis resource can be imported by `name`bash\n\n```sh\n $ pulumi import databricks:index/externalLocation:ExternalLocation this \u003cname\u003e\n```\n\n ", + "description": "\u003e **Note** This resource could be only used with workspace-level provider!\n\nTo work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage:\n\n- databricks.StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential.\n- `databricks.ExternalLocation` are objects that combine a cloud storage path with a Storage Credential that can be used to access the location.\n\n\n## Import\n\nThis resource can be imported by `name`bash\n\n```sh\n $ pulumi import databricks:index/externalLocation:ExternalLocation this \u003cname\u003e\n```\n\n ", "properties": { "accessPoint": { "type": "string", @@ -11519,6 +11658,198 @@ "type": "object" } }, + "databricks:index/grant:Grant": { + "properties": { + "catalog": { + "type": "string" + }, + "externalLocation": { + "type": "string" + }, + "foreignConnection": { + "type": "string" + }, + "function": { + "type": "string" + }, + "metastore": { + "type": "string" + }, + "model": { + "type": "string" + }, + "pipeline": { + "type": "string" + }, + "principal": { + "type": "string" + }, + "privileges": { + "type": "array", + "items": { + "type": "string" + } + }, + "recipient": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "share": { + "type": "string" + }, + "storageCredential": { + "type": "string" + }, + "table": { + "type": "string" + }, + "volume": { + "type": "string" + } + }, + "required": [ + "principal", + "privileges" + ], + "inputProperties": { + "catalog": { + "type": "string", + "willReplaceOnChanges": true + }, + "externalLocation": { + "type": "string", + "willReplaceOnChanges": true + }, + "foreignConnection": { + "type": "string", + "willReplaceOnChanges": true + }, + "function": { + "type": "string", + "willReplaceOnChanges": true + }, + "metastore": { + "type": "string", + "willReplaceOnChanges": true + }, + "model": { + "type": "string", + "willReplaceOnChanges": true + }, + "pipeline": { + "type": "string", + "willReplaceOnChanges": true + }, + "principal": { + "type": "string", + "willReplaceOnChanges": true + }, + "privileges": { + "type": "array", + "items": { + "type": "string" + } + }, + "recipient": { + "type": "string", + "willReplaceOnChanges": true + }, + "schema": { + "type": "string", + "willReplaceOnChanges": true + }, + "share": { + "type": "string", + "willReplaceOnChanges": true + }, + "storageCredential": { + "type": "string", + "willReplaceOnChanges": true + }, + "table": { + "type": "string", + "willReplaceOnChanges": true + }, + "volume": { + "type": "string", + "willReplaceOnChanges": true + } + }, + "requiredInputs": [ + "principal", + "privileges" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering Grant resources.\n", + "properties": { + "catalog": { + "type": "string", + "willReplaceOnChanges": true + }, + "externalLocation": { + "type": "string", + "willReplaceOnChanges": true + }, + "foreignConnection": { + "type": "string", + "willReplaceOnChanges": true + }, + "function": { + "type": "string", + "willReplaceOnChanges": true + }, + "metastore": { + "type": "string", + "willReplaceOnChanges": true + }, + "model": { + "type": "string", + "willReplaceOnChanges": true + }, + "pipeline": { + "type": "string", + "willReplaceOnChanges": true + }, + "principal": { + "type": "string", + "willReplaceOnChanges": true + }, + "privileges": { + "type": "array", + "items": { + "type": "string" + } + }, + "recipient": { + "type": "string", + "willReplaceOnChanges": true + }, + "schema": { + "type": "string", + "willReplaceOnChanges": true + }, + "share": { + "type": "string", + "willReplaceOnChanges": true + }, + "storageCredential": { + "type": "string", + "willReplaceOnChanges": true + }, + "table": { + "type": "string", + "willReplaceOnChanges": true + }, + "volume": { + "type": "string", + "willReplaceOnChanges": true + } + }, + "type": "object" + } + }, "databricks:index/grants:Grants": { "properties": { "catalog": { @@ -12965,7 +13296,7 @@ } }, "databricks:index/metastore:Metastore": { - "description": "A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore.\n\nUnity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore).\n\nA Unity Catalog metastore can be created without a root location \u0026 credential to maintain strict separation of storage across catalogs or environments.\n\n\n## Import\n\nThis resource can be imported by IDbash\n\n```sh\n $ pulumi import databricks:index/metastore:Metastore this \u003cid\u003e\n```\n\n ", + "description": "\u003e **Note** This resource could be used with account or workspace-level provider.\n\nA metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore.\n\nUnity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore).\n\nA Unity Catalog metastore can be created without a root location \u0026 credential to maintain strict separation of storage across catalogs or environments.\n\n\n## Import\n\nThis resource can be imported by IDbash\n\n```sh\n $ pulumi import databricks:index/metastore:Metastore this \u003cid\u003e\n```\n\n ", "properties": { "cloud": { "type": "string" @@ -13169,7 +13500,7 @@ } }, "databricks:index/metastoreAssignment:MetastoreAssignment": { - "description": "A single databricks.Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst thisMetastore = new databricks.Metastore(\"thisMetastore\", {\n storageRoot: `s3://${aws_s3_bucket.metastore.id}/metastore`,\n owner: \"uc admins\",\n region: \"us-east-1\",\n forceDestroy: true,\n});\nconst thisMetastoreAssignment = new databricks.MetastoreAssignment(\"thisMetastoreAssignment\", {\n metastoreId: thisMetastore.id,\n workspaceId: local.workspace_id,\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis_metastore = databricks.Metastore(\"thisMetastore\",\n storage_root=f\"s3://{aws_s3_bucket['metastore']['id']}/metastore\",\n owner=\"uc admins\",\n region=\"us-east-1\",\n force_destroy=True)\nthis_metastore_assignment = databricks.MetastoreAssignment(\"thisMetastoreAssignment\",\n metastore_id=this_metastore.id,\n workspace_id=local[\"workspace_id\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var thisMetastore = new Databricks.Metastore(\"thisMetastore\", new()\n {\n StorageRoot = $\"s3://{aws_s3_bucket.Metastore.Id}/metastore\",\n Owner = \"uc admins\",\n Region = \"us-east-1\",\n ForceDestroy = true,\n });\n\n var thisMetastoreAssignment = new Databricks.MetastoreAssignment(\"thisMetastoreAssignment\", new()\n {\n MetastoreId = thisMetastore.Id,\n WorkspaceId = local.Workspace_id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthisMetastore, err := databricks.NewMetastore(ctx, \"thisMetastore\", \u0026databricks.MetastoreArgs{\n\t\t\tStorageRoot: pulumi.String(fmt.Sprintf(\"s3://%v/metastore\", aws_s3_bucket.Metastore.Id)),\n\t\t\tOwner: pulumi.String(\"uc admins\"),\n\t\t\tRegion: pulumi.String(\"us-east-1\"),\n\t\t\tForceDestroy: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewMetastoreAssignment(ctx, \"thisMetastoreAssignment\", \u0026databricks.MetastoreAssignmentArgs{\n\t\t\tMetastoreId: thisMetastore.ID(),\n\t\t\tWorkspaceId: pulumi.Any(local.Workspace_id),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Metastore;\nimport com.pulumi.databricks.MetastoreArgs;\nimport com.pulumi.databricks.MetastoreAssignment;\nimport com.pulumi.databricks.MetastoreAssignmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var thisMetastore = new Metastore(\"thisMetastore\", MetastoreArgs.builder() \n .storageRoot(String.format(\"s3://%s/metastore\", aws_s3_bucket.metastore().id()))\n .owner(\"uc admins\")\n .region(\"us-east-1\")\n .forceDestroy(true)\n .build());\n\n var thisMetastoreAssignment = new MetastoreAssignment(\"thisMetastoreAssignment\", MetastoreAssignmentArgs.builder() \n .metastoreId(thisMetastore.id())\n .workspaceId(local.workspace_id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n thisMetastore:\n type: databricks:Metastore\n properties:\n storageRoot: s3://${aws_s3_bucket.metastore.id}/metastore\n owner: uc admins\n region: us-east-1\n forceDestroy: true\n thisMetastoreAssignment:\n type: databricks:MetastoreAssignment\n properties:\n metastoreId: ${thisMetastore.id}\n workspaceId: ${local.workspace_id}\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by combination of workspace id and metastore idbash\n\n```sh\n $ pulumi import databricks:index/metastoreAssignment:MetastoreAssignment this '\u003cworkspace_id\u003e|\u003cmetastore_id\u003e'\n```\n\n ", + "description": "\u003e **Note** This resource could be only used with account-level provider!\n\nA single databricks.Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst thisMetastore = new databricks.Metastore(\"thisMetastore\", {\n storageRoot: `s3://${aws_s3_bucket.metastore.id}/metastore`,\n owner: \"uc admins\",\n region: \"us-east-1\",\n forceDestroy: true,\n});\nconst thisMetastoreAssignment = new databricks.MetastoreAssignment(\"thisMetastoreAssignment\", {\n metastoreId: thisMetastore.id,\n workspaceId: local.workspace_id,\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis_metastore = databricks.Metastore(\"thisMetastore\",\n storage_root=f\"s3://{aws_s3_bucket['metastore']['id']}/metastore\",\n owner=\"uc admins\",\n region=\"us-east-1\",\n force_destroy=True)\nthis_metastore_assignment = databricks.MetastoreAssignment(\"thisMetastoreAssignment\",\n metastore_id=this_metastore.id,\n workspace_id=local[\"workspace_id\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var thisMetastore = new Databricks.Metastore(\"thisMetastore\", new()\n {\n StorageRoot = $\"s3://{aws_s3_bucket.Metastore.Id}/metastore\",\n Owner = \"uc admins\",\n Region = \"us-east-1\",\n ForceDestroy = true,\n });\n\n var thisMetastoreAssignment = new Databricks.MetastoreAssignment(\"thisMetastoreAssignment\", new()\n {\n MetastoreId = thisMetastore.Id,\n WorkspaceId = local.Workspace_id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthisMetastore, err := databricks.NewMetastore(ctx, \"thisMetastore\", \u0026databricks.MetastoreArgs{\n\t\t\tStorageRoot: pulumi.String(fmt.Sprintf(\"s3://%v/metastore\", aws_s3_bucket.Metastore.Id)),\n\t\t\tOwner: pulumi.String(\"uc admins\"),\n\t\t\tRegion: pulumi.String(\"us-east-1\"),\n\t\t\tForceDestroy: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewMetastoreAssignment(ctx, \"thisMetastoreAssignment\", \u0026databricks.MetastoreAssignmentArgs{\n\t\t\tMetastoreId: thisMetastore.ID(),\n\t\t\tWorkspaceId: pulumi.Any(local.Workspace_id),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Metastore;\nimport com.pulumi.databricks.MetastoreArgs;\nimport com.pulumi.databricks.MetastoreAssignment;\nimport com.pulumi.databricks.MetastoreAssignmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var thisMetastore = new Metastore(\"thisMetastore\", MetastoreArgs.builder() \n .storageRoot(String.format(\"s3://%s/metastore\", aws_s3_bucket.metastore().id()))\n .owner(\"uc admins\")\n .region(\"us-east-1\")\n .forceDestroy(true)\n .build());\n\n var thisMetastoreAssignment = new MetastoreAssignment(\"thisMetastoreAssignment\", MetastoreAssignmentArgs.builder() \n .metastoreId(thisMetastore.id())\n .workspaceId(local.workspace_id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n thisMetastore:\n type: databricks:Metastore\n properties:\n storageRoot: s3://${aws_s3_bucket.metastore.id}/metastore\n owner: uc admins\n region: us-east-1\n forceDestroy: true\n thisMetastoreAssignment:\n type: databricks:MetastoreAssignment\n properties:\n metastoreId: ${thisMetastore.id}\n workspaceId: ${local.workspace_id}\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by combination of workspace id and metastore idbash\n\n```sh\n $ pulumi import databricks:index/metastoreAssignment:MetastoreAssignment this '\u003cworkspace_id\u003e|\u003cmetastore_id\u003e'\n```\n\n ", "properties": { "defaultCatalogName": { "type": "string", @@ -13230,7 +13561,7 @@ } }, "databricks:index/metastoreDataAccess:MetastoreDataAccess": { - "description": "Optionally, each databricks.Metastore can have a default databricks.StorageCredential defined as `databricks.MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined.\n\n\n## Import\n\nThis resource can be imported by combination of metastore id and the data access name. bash\n\n```sh\n $ pulumi import databricks:index/metastoreDataAccess:MetastoreDataAccess this '\u003cmetastore_id\u003e|\u003cname\u003e'\n```\n\n ", + "description": "\u003e **Note** This resource could be used with account or workspace-level provider.\n\nOptionally, each databricks.Metastore can have a default databricks.StorageCredential defined as `databricks.MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined.\n\n\n## Import\n\nThis resource can be imported by combination of metastore id and the data access name. bash\n\n```sh\n $ pulumi import databricks:index/metastoreDataAccess:MetastoreDataAccess this '\u003cmetastore_id\u003e|\u003cname\u003e'\n```\n\n ", "properties": { "awsIamRole": { "$ref": "#/types/databricks:index/MetastoreDataAccessAwsIamRole:MetastoreDataAccessAwsIamRole" @@ -13271,6 +13602,9 @@ }, "readOnly": { "type": "boolean" + }, + "skipValidation": { + "type": "boolean" } }, "required": [ @@ -13329,6 +13663,10 @@ "readOnly": { "type": "boolean", "willReplaceOnChanges": true + }, + "skipValidation": { + "type": "boolean", + "willReplaceOnChanges": true } }, "stateInputs": { @@ -13383,13 +13721,17 @@ "readOnly": { "type": "boolean", "willReplaceOnChanges": true + }, + "skipValidation": { + "type": "boolean", + "willReplaceOnChanges": true } }, "type": "object" } }, "databricks:index/metastoreProvider:MetastoreProvider": { - "description": "Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you.\n\nA `databricks.MetastoreProvider` is contained within databricks.Metastore and can contain a list of shares that have been shared with you.\n\nNote that Databricks to Databricks sharing automatically creates the provider.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst dbprovider = new databricks.MetastoreProvider(\"dbprovider\", {\n comment: \"made by terraform 2\",\n authenticationType: \"TOKEN\",\n recipientProfileStr: JSON.stringify({\n shareCredentialsVersion: 1,\n bearerToken: \"token\",\n endpoint: \"endpoint\",\n expirationTime: \"expiration-time\",\n }),\n});\n```\n```python\nimport pulumi\nimport json\nimport pulumi_databricks as databricks\n\ndbprovider = databricks.MetastoreProvider(\"dbprovider\",\n comment=\"made by terraform 2\",\n authentication_type=\"TOKEN\",\n recipient_profile_str=json.dumps({\n \"shareCredentialsVersion\": 1,\n \"bearerToken\": \"token\",\n \"endpoint\": \"endpoint\",\n \"expirationTime\": \"expiration-time\",\n }))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text.Json;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var dbprovider = new Databricks.MetastoreProvider(\"dbprovider\", new()\n {\n Comment = \"made by terraform 2\",\n AuthenticationType = \"TOKEN\",\n RecipientProfileStr = JsonSerializer.Serialize(new Dictionary\u003cstring, object?\u003e\n {\n [\"shareCredentialsVersion\"] = 1,\n [\"bearerToken\"] = \"token\",\n [\"endpoint\"] = \"endpoint\",\n [\"expirationTime\"] = \"expiration-time\",\n }),\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"encoding/json\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\ttmpJSON0, err := json.Marshal(map[string]interface{}{\n\t\t\t\"shareCredentialsVersion\": 1,\n\t\t\t\"bearerToken\": \"token\",\n\t\t\t\"endpoint\": \"endpoint\",\n\t\t\t\"expirationTime\": \"expiration-time\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson0 := string(tmpJSON0)\n\t\t_, err = databricks.NewMetastoreProvider(ctx, \"dbprovider\", \u0026databricks.MetastoreProviderArgs{\n\t\t\tComment: pulumi.String(\"made by terraform 2\"),\n\t\t\tAuthenticationType: pulumi.String(\"TOKEN\"),\n\t\t\tRecipientProfileStr: pulumi.String(json0),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MetastoreProvider;\nimport com.pulumi.databricks.MetastoreProviderArgs;\nimport static com.pulumi.codegen.internal.Serialization.*;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var dbprovider = new MetastoreProvider(\"dbprovider\", MetastoreProviderArgs.builder() \n .comment(\"made by terraform 2\")\n .authenticationType(\"TOKEN\")\n .recipientProfileStr(serializeJson(\n jsonObject(\n jsonProperty(\"shareCredentialsVersion\", 1),\n jsonProperty(\"bearerToken\", \"token\"),\n jsonProperty(\"endpoint\", \"endpoint\"),\n jsonProperty(\"expirationTime\", \"expiration-time\")\n )))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dbprovider:\n type: databricks:MetastoreProvider\n properties:\n comment: made by terraform 2\n authenticationType: TOKEN\n recipientProfileStr:\n fn::toJSON:\n shareCredentialsVersion: 1\n bearerToken: token\n endpoint: endpoint\n expirationTime: expiration-time\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are used in the same context:\n\n* databricks.getTables data to list tables within Unity Catalog.\n* databricks.getSchemas data to list schemas within Unity Catalog.\n* databricks.getCatalogs data to list catalogs within Unity Catalog.\n", + "description": "\u003e **Note** This resource could be only used with workspace-level provider!\n\nWithin a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you.\n\nA `databricks.MetastoreProvider` is contained within databricks.Metastore and can contain a list of shares that have been shared with you.\n\nNote that Databricks to Databricks sharing automatically creates the provider.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst dbprovider = new databricks.MetastoreProvider(\"dbprovider\", {\n comment: \"made by terraform 2\",\n authenticationType: \"TOKEN\",\n recipientProfileStr: JSON.stringify({\n shareCredentialsVersion: 1,\n bearerToken: \"token\",\n endpoint: \"endpoint\",\n expirationTime: \"expiration-time\",\n }),\n});\n```\n```python\nimport pulumi\nimport json\nimport pulumi_databricks as databricks\n\ndbprovider = databricks.MetastoreProvider(\"dbprovider\",\n comment=\"made by terraform 2\",\n authentication_type=\"TOKEN\",\n recipient_profile_str=json.dumps({\n \"shareCredentialsVersion\": 1,\n \"bearerToken\": \"token\",\n \"endpoint\": \"endpoint\",\n \"expirationTime\": \"expiration-time\",\n }))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text.Json;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var dbprovider = new Databricks.MetastoreProvider(\"dbprovider\", new()\n {\n Comment = \"made by terraform 2\",\n AuthenticationType = \"TOKEN\",\n RecipientProfileStr = JsonSerializer.Serialize(new Dictionary\u003cstring, object?\u003e\n {\n [\"shareCredentialsVersion\"] = 1,\n [\"bearerToken\"] = \"token\",\n [\"endpoint\"] = \"endpoint\",\n [\"expirationTime\"] = \"expiration-time\",\n }),\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"encoding/json\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\ttmpJSON0, err := json.Marshal(map[string]interface{}{\n\t\t\t\"shareCredentialsVersion\": 1,\n\t\t\t\"bearerToken\": \"token\",\n\t\t\t\"endpoint\": \"endpoint\",\n\t\t\t\"expirationTime\": \"expiration-time\",\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjson0 := string(tmpJSON0)\n\t\t_, err = databricks.NewMetastoreProvider(ctx, \"dbprovider\", \u0026databricks.MetastoreProviderArgs{\n\t\t\tComment: pulumi.String(\"made by terraform 2\"),\n\t\t\tAuthenticationType: pulumi.String(\"TOKEN\"),\n\t\t\tRecipientProfileStr: pulumi.String(json0),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MetastoreProvider;\nimport com.pulumi.databricks.MetastoreProviderArgs;\nimport static com.pulumi.codegen.internal.Serialization.*;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var dbprovider = new MetastoreProvider(\"dbprovider\", MetastoreProviderArgs.builder() \n .comment(\"made by terraform 2\")\n .authenticationType(\"TOKEN\")\n .recipientProfileStr(serializeJson(\n jsonObject(\n jsonProperty(\"shareCredentialsVersion\", 1),\n jsonProperty(\"bearerToken\", \"token\"),\n jsonProperty(\"endpoint\", \"endpoint\"),\n jsonProperty(\"expirationTime\", \"expiration-time\")\n )))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dbprovider:\n type: databricks:MetastoreProvider\n properties:\n comment: made by terraform 2\n authenticationType: TOKEN\n recipientProfileStr:\n fn::toJSON:\n shareCredentialsVersion: 1\n bearerToken: token\n endpoint: endpoint\n expirationTime: expiration-time\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are used in the same context:\n\n* databricks.getTables data to list tables within Unity Catalog.\n* databricks.getSchemas data to list schemas within Unity Catalog.\n* databricks.getCatalogs data to list catalogs within Unity Catalog.\n", "properties": { "authenticationType": { "type": "string", @@ -16174,7 +16516,7 @@ } }, "databricks:index/recipient:Recipient": { - "description": "Within a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to.\n\nA `databricks.Recipient` is contained within databricks.Metastore and can have permissions to `SELECT` from a list of shares.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Databricks Sharing with non databricks recipient\n\nSetting `authentication_type` type to `TOKEN` creates a temporary url to download a credentials file. This is used to\nauthenticate to the sharing server to access data. This is for when the recipient is not using Databricks.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\nimport * as random from \"@pulumi/random\";\n\nconst db2opensharecode = new random.RandomPassword(\"db2opensharecode\", {\n length: 16,\n special: true,\n});\nconst current = databricks.getCurrentUser({});\nconst db2open = new databricks.Recipient(\"db2open\", {\n comment: \"made by terraform\",\n authenticationType: \"TOKEN\",\n sharingCode: db2opensharecode.result,\n ipAccessList: {\n allowedIpAddresses: [],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\nimport pulumi_random as random\n\ndb2opensharecode = random.RandomPassword(\"db2opensharecode\",\n length=16,\n special=True)\ncurrent = databricks.get_current_user()\ndb2open = databricks.Recipient(\"db2open\",\n comment=\"made by terraform\",\n authentication_type=\"TOKEN\",\n sharing_code=db2opensharecode.result,\n ip_access_list=databricks.RecipientIpAccessListArgs(\n allowed_ip_addresses=[],\n ))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var db2opensharecode = new Random.RandomPassword(\"db2opensharecode\", new()\n {\n Length = 16,\n Special = true,\n });\n\n var current = Databricks.GetCurrentUser.Invoke();\n\n var db2open = new Databricks.Recipient(\"db2open\", new()\n {\n Comment = \"made by terraform\",\n AuthenticationType = \"TOKEN\",\n SharingCode = db2opensharecode.Result,\n IpAccessList = new Databricks.Inputs.RecipientIpAccessListArgs\n {\n AllowedIpAddresses = new() { },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdb2opensharecode, err := random.NewRandomPassword(ctx, \"db2opensharecode\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.GetCurrentUser(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewRecipient(ctx, \"db2open\", \u0026databricks.RecipientArgs{\n\t\t\tComment: pulumi.String(\"made by terraform\"),\n\t\t\tAuthenticationType: pulumi.String(\"TOKEN\"),\n\t\t\tSharingCode: db2opensharecode.Result,\n\t\t\tIpAccessList: \u0026databricks.RecipientIpAccessListArgs{\n\t\t\t\tAllowedIpAddresses: pulumi.StringArray{},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.Recipient;\nimport com.pulumi.databricks.RecipientArgs;\nimport com.pulumi.databricks.inputs.RecipientIpAccessListArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var db2opensharecode = new RandomPassword(\"db2opensharecode\", RandomPasswordArgs.builder() \n .length(16)\n .special(true)\n .build());\n\n final var current = DatabricksFunctions.getCurrentUser();\n\n var db2open = new Recipient(\"db2open\", RecipientArgs.builder() \n .comment(\"made by terraform\")\n .authenticationType(\"TOKEN\")\n .sharingCode(db2opensharecode.result())\n .ipAccessList(RecipientIpAccessListArgs.builder()\n .allowedIpAddresses()\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n db2opensharecode:\n type: random:RandomPassword\n properties:\n length: 16\n special: true\n db2open:\n type: databricks:Recipient\n properties:\n comment: made by terraform\n authenticationType: TOKEN\n sharingCode: ${db2opensharecode.result}\n ipAccessList:\n allowedIpAddresses: []\nvariables:\n current:\n fn::invoke:\n Function: databricks:getCurrentUser\n Arguments: {}\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.Share to create Delta Sharing shares.\n* databricks.Grants to manage Delta Sharing permissions.\n* databricks.getShares to read existing Delta Sharing shares.\n", + "description": "\u003e **Note** This resource could be only used with workspace-level provider!\n\nWithin a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to.\n\nA `databricks.Recipient` is contained within databricks.Metastore and can have permissions to `SELECT` from a list of shares.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Databricks Sharing with non databricks recipient\n\nSetting `authentication_type` type to `TOKEN` creates a temporary url to download a credentials file. This is used to\nauthenticate to the sharing server to access data. This is for when the recipient is not using Databricks.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\nimport * as random from \"@pulumi/random\";\n\nconst db2opensharecode = new random.RandomPassword(\"db2opensharecode\", {\n length: 16,\n special: true,\n});\nconst current = databricks.getCurrentUser({});\nconst db2open = new databricks.Recipient(\"db2open\", {\n comment: \"made by terraform\",\n authenticationType: \"TOKEN\",\n sharingCode: db2opensharecode.result,\n ipAccessList: {\n allowedIpAddresses: [],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\nimport pulumi_random as random\n\ndb2opensharecode = random.RandomPassword(\"db2opensharecode\",\n length=16,\n special=True)\ncurrent = databricks.get_current_user()\ndb2open = databricks.Recipient(\"db2open\",\n comment=\"made by terraform\",\n authentication_type=\"TOKEN\",\n sharing_code=db2opensharecode.result,\n ip_access_list=databricks.RecipientIpAccessListArgs(\n allowed_ip_addresses=[],\n ))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var db2opensharecode = new Random.RandomPassword(\"db2opensharecode\", new()\n {\n Length = 16,\n Special = true,\n });\n\n var current = Databricks.GetCurrentUser.Invoke();\n\n var db2open = new Databricks.Recipient(\"db2open\", new()\n {\n Comment = \"made by terraform\",\n AuthenticationType = \"TOKEN\",\n SharingCode = db2opensharecode.Result,\n IpAccessList = new Databricks.Inputs.RecipientIpAccessListArgs\n {\n AllowedIpAddresses = new() { },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdb2opensharecode, err := random.NewRandomPassword(ctx, \"db2opensharecode\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.GetCurrentUser(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewRecipient(ctx, \"db2open\", \u0026databricks.RecipientArgs{\n\t\t\tComment: pulumi.String(\"made by terraform\"),\n\t\t\tAuthenticationType: pulumi.String(\"TOKEN\"),\n\t\t\tSharingCode: db2opensharecode.Result,\n\t\t\tIpAccessList: \u0026databricks.RecipientIpAccessListArgs{\n\t\t\t\tAllowedIpAddresses: pulumi.StringArray{},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.Recipient;\nimport com.pulumi.databricks.RecipientArgs;\nimport com.pulumi.databricks.inputs.RecipientIpAccessListArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var db2opensharecode = new RandomPassword(\"db2opensharecode\", RandomPasswordArgs.builder() \n .length(16)\n .special(true)\n .build());\n\n final var current = DatabricksFunctions.getCurrentUser();\n\n var db2open = new Recipient(\"db2open\", RecipientArgs.builder() \n .comment(\"made by terraform\")\n .authenticationType(\"TOKEN\")\n .sharingCode(db2opensharecode.result())\n .ipAccessList(RecipientIpAccessListArgs.builder()\n .allowedIpAddresses()\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n db2opensharecode:\n type: random:RandomPassword\n properties:\n length: 16\n special: true\n db2open:\n type: databricks:Recipient\n properties:\n comment: made by terraform\n authenticationType: TOKEN\n sharingCode: ${db2opensharecode.result}\n ipAccessList:\n allowedIpAddresses: []\nvariables:\n current:\n fn::invoke:\n Function: databricks:getCurrentUser\n Arguments: {}\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.Share to create Delta Sharing shares.\n* databricks.Grants to manage Delta Sharing permissions.\n* databricks.getShares to read existing Delta Sharing shares.\n", "properties": { "authenticationType": { "type": "string", @@ -16311,7 +16653,7 @@ } }, "databricks:index/registeredModel:RegisteredModel": { - "description": "This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.RegisteredModel(\"this\", {\n catalogName: \"main\",\n schemaName: \"default\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.RegisteredModel(\"this\",\n catalog_name=\"main\",\n schema_name=\"default\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.RegisteredModel(\"this\", new()\n {\n CatalogName = \"main\",\n SchemaName = \"default\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewRegisteredModel(ctx, \"this\", \u0026databricks.RegisteredModelArgs{\n\t\t\tCatalogName: pulumi.String(\"main\"),\n\t\t\tSchemaName: pulumi.String(\"default\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.RegisteredModel;\nimport com.pulumi.databricks.RegisteredModelArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new RegisteredModel(\"this\", RegisteredModelArgs.builder() \n .catalogName(\"main\")\n .schemaName(\"default\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:RegisteredModel\n properties:\n catalogName: main\n schemaName: default\n```\n{{% /example %}}\n{{% /examples %}}\n## Access Control\n\n* databricks.Grants can be used to grant principals `ALL_PRIVILEGES`, `APPLY_TAG`, and `EXECUTE` privileges.\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.ModelServing to serve this model on a Databricks serving endpoint.\n* databricks.MlflowExperiment to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks.\n* databricks.Table data to manage tables within Unity Catalog.\n* databricks.Schema data to manage schemas within Unity Catalog.\n* databricks.Catalog data to manage catalogs within Unity Catalog.\n\n\n## Import\n\nThe registered model resource can be imported using the full (3-level) name of the model. bash\n\n```sh\n $ pulumi import databricks:index/registeredModel:RegisteredModel this \u003ccatalog_name.schema_name.model_name\u003e\n```\n\n ", + "description": "\u003e **Note** This resource could be only used with workspace-level provider!\n\nThis resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.RegisteredModel(\"this\", {\n catalogName: \"main\",\n schemaName: \"default\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.RegisteredModel(\"this\",\n catalog_name=\"main\",\n schema_name=\"default\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.RegisteredModel(\"this\", new()\n {\n CatalogName = \"main\",\n SchemaName = \"default\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewRegisteredModel(ctx, \"this\", \u0026databricks.RegisteredModelArgs{\n\t\t\tCatalogName: pulumi.String(\"main\"),\n\t\t\tSchemaName: pulumi.String(\"default\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.RegisteredModel;\nimport com.pulumi.databricks.RegisteredModelArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new RegisteredModel(\"this\", RegisteredModelArgs.builder() \n .catalogName(\"main\")\n .schemaName(\"default\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:RegisteredModel\n properties:\n catalogName: main\n schemaName: default\n```\n{{% /example %}}\n{{% /examples %}}\n## Access Control\n\n* databricks.Grants can be used to grant principals `ALL_PRIVILEGES`, `APPLY_TAG`, and `EXECUTE` privileges.\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.ModelServing to serve this model on a Databricks serving endpoint.\n* databricks.MlflowExperiment to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks.\n* databricks.Table data to manage tables within Unity Catalog.\n* databricks.Schema data to manage schemas within Unity Catalog.\n* databricks.Catalog data to manage catalogs within Unity Catalog.\n\n\n## Import\n\nThe registered model resource can be imported using the full (3-level) name of the model. bash\n\n```sh\n $ pulumi import databricks:index/registeredModel:RegisteredModel this \u003ccatalog_name.schema_name.model_name\u003e\n```\n\n ", "properties": { "catalogName": { "type": "string", @@ -16427,6 +16769,10 @@ "url": { "type": "string", "description": "The URL of the Git Repository to clone from. If the value changes, repo is re-created.\n" + }, + "workspacePath": { + "type": "string", + "description": "path on Workspace File System (WSFS) in form of `/Workspace` + `path`\n" } }, "required": [ @@ -16434,7 +16780,8 @@ "commitHash", "gitProvider", "path", - "url" + "url", + "workspacePath" ], "inputProperties": { "branch": { @@ -16505,13 +16852,17 @@ "type": "string", "description": "The URL of the Git Repository to clone from. If the value changes, repo is re-created.\n", "willReplaceOnChanges": true + }, + "workspacePath": { + "type": "string", + "description": "path on Workspace File System (WSFS) in form of `/Workspace` + `path`\n" } }, "type": "object" } }, "databricks:index/schema:Schema": { - "description": "Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views.\n\nA `databricks.Schema` is contained within databricks.Catalog and can contain tables \u0026 views.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst sandbox = new databricks.Catalog(\"sandbox\", {\n comment: \"this catalog is managed by terraform\",\n properties: {\n purpose: \"testing\",\n },\n});\nconst things = new databricks.Schema(\"things\", {\n catalogName: sandbox.id,\n comment: \"this database is managed by terraform\",\n properties: {\n kind: \"various\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nsandbox = databricks.Catalog(\"sandbox\",\n comment=\"this catalog is managed by terraform\",\n properties={\n \"purpose\": \"testing\",\n })\nthings = databricks.Schema(\"things\",\n catalog_name=sandbox.id,\n comment=\"this database is managed by terraform\",\n properties={\n \"kind\": \"various\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var sandbox = new Databricks.Catalog(\"sandbox\", new()\n {\n Comment = \"this catalog is managed by terraform\",\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n var things = new Databricks.Schema(\"things\", new()\n {\n CatalogName = sandbox.Id,\n Comment = \"this database is managed by terraform\",\n Properties = \n {\n { \"kind\", \"various\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsandbox, err := databricks.NewCatalog(ctx, \"sandbox\", \u0026databricks.CatalogArgs{\n\t\t\tComment: pulumi.String(\"this catalog is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewSchema(ctx, \"things\", \u0026databricks.SchemaArgs{\n\t\t\tCatalogName: sandbox.ID(),\n\t\t\tComment: pulumi.String(\"this database is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"kind\": pulumi.Any(\"various\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Catalog;\nimport com.pulumi.databricks.CatalogArgs;\nimport com.pulumi.databricks.Schema;\nimport com.pulumi.databricks.SchemaArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var sandbox = new Catalog(\"sandbox\", CatalogArgs.builder() \n .comment(\"this catalog is managed by terraform\")\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n var things = new Schema(\"things\", SchemaArgs.builder() \n .catalogName(sandbox.id())\n .comment(\"this database is managed by terraform\")\n .properties(Map.of(\"kind\", \"various\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n sandbox:\n type: databricks:Catalog\n properties:\n comment: this catalog is managed by terraform\n properties:\n purpose: testing\n things:\n type: databricks:Schema\n properties:\n catalogName: ${sandbox.id}\n comment: this database is managed by terraform\n properties:\n kind: various\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are used in the same context:\n\n* databricks.getTables data to list tables within Unity Catalog.\n* databricks.getSchemas data to list schemas within Unity Catalog.\n* databricks.getCatalogs data to list catalogs within Unity Catalog.\n\n\n## Import\n\nThis resource can be imported by its full namebash\n\n```sh\n $ pulumi import databricks:index/schema:Schema this \u003ccatalog_name\u003e.\u003cname\u003e\n```\n\n ", + "description": "\u003e **Note** This resource could be only used with workspace-level provider!\n\nWithin a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views.\n\nA `databricks.Schema` is contained within databricks.Catalog and can contain tables \u0026 views.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst sandbox = new databricks.Catalog(\"sandbox\", {\n comment: \"this catalog is managed by terraform\",\n properties: {\n purpose: \"testing\",\n },\n});\nconst things = new databricks.Schema(\"things\", {\n catalogName: sandbox.id,\n comment: \"this database is managed by terraform\",\n properties: {\n kind: \"various\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nsandbox = databricks.Catalog(\"sandbox\",\n comment=\"this catalog is managed by terraform\",\n properties={\n \"purpose\": \"testing\",\n })\nthings = databricks.Schema(\"things\",\n catalog_name=sandbox.id,\n comment=\"this database is managed by terraform\",\n properties={\n \"kind\": \"various\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var sandbox = new Databricks.Catalog(\"sandbox\", new()\n {\n Comment = \"this catalog is managed by terraform\",\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n var things = new Databricks.Schema(\"things\", new()\n {\n CatalogName = sandbox.Id,\n Comment = \"this database is managed by terraform\",\n Properties = \n {\n { \"kind\", \"various\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsandbox, err := databricks.NewCatalog(ctx, \"sandbox\", \u0026databricks.CatalogArgs{\n\t\t\tComment: pulumi.String(\"this catalog is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewSchema(ctx, \"things\", \u0026databricks.SchemaArgs{\n\t\t\tCatalogName: sandbox.ID(),\n\t\t\tComment: pulumi.String(\"this database is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"kind\": pulumi.Any(\"various\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Catalog;\nimport com.pulumi.databricks.CatalogArgs;\nimport com.pulumi.databricks.Schema;\nimport com.pulumi.databricks.SchemaArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var sandbox = new Catalog(\"sandbox\", CatalogArgs.builder() \n .comment(\"this catalog is managed by terraform\")\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n var things = new Schema(\"things\", SchemaArgs.builder() \n .catalogName(sandbox.id())\n .comment(\"this database is managed by terraform\")\n .properties(Map.of(\"kind\", \"various\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n sandbox:\n type: databricks:Catalog\n properties:\n comment: this catalog is managed by terraform\n properties:\n purpose: testing\n things:\n type: databricks:Schema\n properties:\n catalogName: ${sandbox.id}\n comment: this database is managed by terraform\n properties:\n kind: various\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are used in the same context:\n\n* databricks.getTables data to list tables within Unity Catalog.\n* databricks.getSchemas data to list schemas within Unity Catalog.\n* databricks.getCatalogs data to list catalogs within Unity Catalog.\n\n\n## Import\n\nThis resource can be imported by its full namebash\n\n```sh\n $ pulumi import databricks:index/schema:Schema this \u003ccatalog_name\u003e.\u003cname\u003e\n```\n\n ", "properties": { "catalogName": { "type": "string", @@ -17457,6 +17808,10 @@ "type": "string", "description": "The size of the clusters allocated to the endpoint: \"2X-Small\", \"X-Small\", \"Small\", \"Medium\", \"Large\", \"X-Large\", \"2X-Large\", \"3X-Large\", \"4X-Large\".\n" }, + "creatorName": { + "type": "string", + "description": "The username of the user who created the endpoint.\n" + }, "dataSourceId": { "type": "string", "description": "ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.\n" @@ -17469,6 +17824,13 @@ "type": "boolean", "description": "Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.\n\n- **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup).\n\n- **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall).\n" }, + "healths": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/SqlEndpointHealth:SqlEndpointHealth" + }, + "description": "Health status of the endpoint.\n" + }, "instanceProfileArn": { "type": "string" }, @@ -17488,8 +17850,13 @@ "type": "string", "description": "Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`.\n" }, + "numActiveSessions": { + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" + }, "numClusters": { - "type": "integer" + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" }, "odbcParams": { "$ref": "#/types/databricks:index/SqlEndpointOdbcParams:SqlEndpointOdbcParams", @@ -17500,7 +17867,8 @@ "description": "The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`.\n" }, "state": { - "type": "string" + "type": "string", + "description": "The current state of the endpoint.\n" }, "tags": { "$ref": "#/types/databricks:index/SqlEndpointTags:SqlEndpointTags", @@ -17508,14 +17876,18 @@ }, "warehouseType": { "type": "string", - "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`.\n" + "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`.\n" } }, "required": [ "clusterSize", + "creatorName", "dataSourceId", + "healths", "jdbcUrl", "name", + "numActiveSessions", + "numClusters", "odbcParams", "state" ], @@ -17547,10 +17919,6 @@ "instanceProfileArn": { "type": "string" }, - "jdbcUrl": { - "type": "string", - "description": "JDBC connection string.\n" - }, "maxNumClusters": { "type": "integer", "description": "Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`.\n" @@ -17563,27 +17931,17 @@ "type": "string", "description": "Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`.\n" }, - "numClusters": { - "type": "integer" - }, - "odbcParams": { - "$ref": "#/types/databricks:index/SqlEndpointOdbcParams:SqlEndpointOdbcParams", - "description": "ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`.\n" - }, "spotInstancePolicy": { "type": "string", "description": "The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`.\n" }, - "state": { - "type": "string" - }, "tags": { "$ref": "#/types/databricks:index/SqlEndpointTags:SqlEndpointTags", "description": "Databricks tags all endpoint resources with these tags.\n" }, "warehouseType": { "type": "string", - "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`.\n" + "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`.\n" } }, "requiredInputs": [ @@ -17604,6 +17962,10 @@ "type": "string", "description": "The size of the clusters allocated to the endpoint: \"2X-Small\", \"X-Small\", \"Small\", \"Medium\", \"Large\", \"X-Large\", \"2X-Large\", \"3X-Large\", \"4X-Large\".\n" }, + "creatorName": { + "type": "string", + "description": "The username of the user who created the endpoint.\n" + }, "dataSourceId": { "type": "string", "description": "ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.\n" @@ -17616,6 +17978,13 @@ "type": "boolean", "description": "Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.\n\n- **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup).\n\n- **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall).\n" }, + "healths": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/SqlEndpointHealth:SqlEndpointHealth" + }, + "description": "Health status of the endpoint.\n" + }, "instanceProfileArn": { "type": "string" }, @@ -17635,8 +18004,13 @@ "type": "string", "description": "Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`.\n" }, + "numActiveSessions": { + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" + }, "numClusters": { - "type": "integer" + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" }, "odbcParams": { "$ref": "#/types/databricks:index/SqlEndpointOdbcParams:SqlEndpointOdbcParams", @@ -17647,7 +18021,8 @@ "description": "The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`.\n" }, "state": { - "type": "string" + "type": "string", + "description": "The current state of the endpoint.\n" }, "tags": { "$ref": "#/types/databricks:index/SqlEndpointTags:SqlEndpointTags", @@ -17655,7 +18030,7 @@ }, "warehouseType": { "type": "string", - "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`.\n" + "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`.\n" } }, "type": "object" @@ -18531,7 +18906,7 @@ } }, "databricks:index/storageCredential:StorageCredential": { - "description": "To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage:\n\n- `databricks.StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential.\n- databricks.ExternalLocation are objects that combine a cloud storage path with a Storage Credential that can be used to access the location.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\nFor AWS\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst external = new databricks.StorageCredential(\"external\", {\n awsIamRole: {\n roleArn: aws_iam_role.external_data_access.arn,\n },\n comment: \"Managed by TF\",\n});\nconst externalCreds = new databricks.Grants(\"externalCreds\", {\n storageCredential: external.id,\n grants: [{\n principal: \"Data Engineers\",\n privileges: [\"CREATE_EXTERNAL_TABLE\"],\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nexternal = databricks.StorageCredential(\"external\",\n aws_iam_role=databricks.StorageCredentialAwsIamRoleArgs(\n role_arn=aws_iam_role[\"external_data_access\"][\"arn\"],\n ),\n comment=\"Managed by TF\")\nexternal_creds = databricks.Grants(\"externalCreds\",\n storage_credential=external.id,\n grants=[databricks.GrantsGrantArgs(\n principal=\"Data Engineers\",\n privileges=[\"CREATE_EXTERNAL_TABLE\"],\n )])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var external = new Databricks.StorageCredential(\"external\", new()\n {\n AwsIamRole = new Databricks.Inputs.StorageCredentialAwsIamRoleArgs\n {\n RoleArn = aws_iam_role.External_data_access.Arn,\n },\n Comment = \"Managed by TF\",\n });\n\n var externalCreds = new Databricks.Grants(\"externalCreds\", new()\n {\n StorageCredential = external.Id,\n GrantDetails = new[]\n {\n new Databricks.Inputs.GrantsGrantArgs\n {\n Principal = \"Data Engineers\",\n Privileges = new[]\n {\n \"CREATE_EXTERNAL_TABLE\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texternal, err := databricks.NewStorageCredential(ctx, \"external\", \u0026databricks.StorageCredentialArgs{\n\t\t\tAwsIamRole: \u0026databricks.StorageCredentialAwsIamRoleArgs{\n\t\t\t\tRoleArn: pulumi.Any(aws_iam_role.External_data_access.Arn),\n\t\t\t},\n\t\t\tComment: pulumi.String(\"Managed by TF\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewGrants(ctx, \"externalCreds\", \u0026databricks.GrantsArgs{\n\t\t\tStorageCredential: external.ID(),\n\t\t\tGrants: databricks.GrantsGrantArray{\n\t\t\t\t\u0026databricks.GrantsGrantArgs{\n\t\t\t\t\tPrincipal: pulumi.String(\"Data Engineers\"),\n\t\t\t\t\tPrivileges: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"CREATE_EXTERNAL_TABLE\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialAwsIamRoleArgs;\nimport com.pulumi.databricks.Grants;\nimport com.pulumi.databricks.GrantsArgs;\nimport com.pulumi.databricks.inputs.GrantsGrantArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var external = new StorageCredential(\"external\", StorageCredentialArgs.builder() \n .awsIamRole(StorageCredentialAwsIamRoleArgs.builder()\n .roleArn(aws_iam_role.external_data_access().arn())\n .build())\n .comment(\"Managed by TF\")\n .build());\n\n var externalCreds = new Grants(\"externalCreds\", GrantsArgs.builder() \n .storageCredential(external.id())\n .grants(GrantsGrantArgs.builder()\n .principal(\"Data Engineers\")\n .privileges(\"CREATE_EXTERNAL_TABLE\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n external:\n type: databricks:StorageCredential\n properties:\n awsIamRole:\n roleArn: ${aws_iam_role.external_data_access.arn}\n comment: Managed by TF\n externalCreds:\n type: databricks:Grants\n properties:\n storageCredential: ${external.id}\n grants:\n - principal: Data Engineers\n privileges:\n - CREATE_EXTERNAL_TABLE\n```\n\nFor Azure\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst externalMi = new databricks.StorageCredential(\"externalMi\", {\n azureManagedIdentity: {\n accessConnectorId: azurerm_databricks_access_connector.example.id,\n },\n comment: \"Managed identity credential managed by TF\",\n});\nconst externalCreds = new databricks.Grants(\"externalCreds\", {\n storageCredential: databricks_storage_credential.external.id,\n grants: [{\n principal: \"Data Engineers\",\n privileges: [\"CREATE_EXTERNAL_TABLE\"],\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nexternal_mi = databricks.StorageCredential(\"externalMi\",\n azure_managed_identity=databricks.StorageCredentialAzureManagedIdentityArgs(\n access_connector_id=azurerm_databricks_access_connector[\"example\"][\"id\"],\n ),\n comment=\"Managed identity credential managed by TF\")\nexternal_creds = databricks.Grants(\"externalCreds\",\n storage_credential=databricks_storage_credential[\"external\"][\"id\"],\n grants=[databricks.GrantsGrantArgs(\n principal=\"Data Engineers\",\n privileges=[\"CREATE_EXTERNAL_TABLE\"],\n )])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var externalMi = new Databricks.StorageCredential(\"externalMi\", new()\n {\n AzureManagedIdentity = new Databricks.Inputs.StorageCredentialAzureManagedIdentityArgs\n {\n AccessConnectorId = azurerm_databricks_access_connector.Example.Id,\n },\n Comment = \"Managed identity credential managed by TF\",\n });\n\n var externalCreds = new Databricks.Grants(\"externalCreds\", new()\n {\n StorageCredential = databricks_storage_credential.External.Id,\n GrantDetails = new[]\n {\n new Databricks.Inputs.GrantsGrantArgs\n {\n Principal = \"Data Engineers\",\n Privileges = new[]\n {\n \"CREATE_EXTERNAL_TABLE\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewStorageCredential(ctx, \"externalMi\", \u0026databricks.StorageCredentialArgs{\n\t\t\tAzureManagedIdentity: \u0026databricks.StorageCredentialAzureManagedIdentityArgs{\n\t\t\t\tAccessConnectorId: pulumi.Any(azurerm_databricks_access_connector.Example.Id),\n\t\t\t},\n\t\t\tComment: pulumi.String(\"Managed identity credential managed by TF\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewGrants(ctx, \"externalCreds\", \u0026databricks.GrantsArgs{\n\t\t\tStorageCredential: pulumi.Any(databricks_storage_credential.External.Id),\n\t\t\tGrants: databricks.GrantsGrantArray{\n\t\t\t\t\u0026databricks.GrantsGrantArgs{\n\t\t\t\t\tPrincipal: pulumi.String(\"Data Engineers\"),\n\t\t\t\t\tPrivileges: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"CREATE_EXTERNAL_TABLE\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialAzureManagedIdentityArgs;\nimport com.pulumi.databricks.Grants;\nimport com.pulumi.databricks.GrantsArgs;\nimport com.pulumi.databricks.inputs.GrantsGrantArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var externalMi = new StorageCredential(\"externalMi\", StorageCredentialArgs.builder() \n .azureManagedIdentity(StorageCredentialAzureManagedIdentityArgs.builder()\n .accessConnectorId(azurerm_databricks_access_connector.example().id())\n .build())\n .comment(\"Managed identity credential managed by TF\")\n .build());\n\n var externalCreds = new Grants(\"externalCreds\", GrantsArgs.builder() \n .storageCredential(databricks_storage_credential.external().id())\n .grants(GrantsGrantArgs.builder()\n .principal(\"Data Engineers\")\n .privileges(\"CREATE_EXTERNAL_TABLE\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n externalMi:\n type: databricks:StorageCredential\n properties:\n azureManagedIdentity:\n accessConnectorId: ${azurerm_databricks_access_connector.example.id}\n comment: Managed identity credential managed by TF\n externalCreds:\n type: databricks:Grants\n properties:\n storageCredential: ${databricks_storage_credential.external.id}\n grants:\n - principal: Data Engineers\n privileges:\n - CREATE_EXTERNAL_TABLE\n```\n\nFor GCP\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst external = new databricks.StorageCredential(\"external\", {databricksGcpServiceAccount: {}});\nconst externalCreds = new databricks.Grants(\"externalCreds\", {\n storageCredential: external.id,\n grants: [{\n principal: \"Data Engineers\",\n privileges: [\"CREATE_EXTERNAL_TABLE\"],\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nexternal = databricks.StorageCredential(\"external\", databricks_gcp_service_account=databricks.StorageCredentialDatabricksGcpServiceAccountArgs())\nexternal_creds = databricks.Grants(\"externalCreds\",\n storage_credential=external.id,\n grants=[databricks.GrantsGrantArgs(\n principal=\"Data Engineers\",\n privileges=[\"CREATE_EXTERNAL_TABLE\"],\n )])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var external = new Databricks.StorageCredential(\"external\", new()\n {\n DatabricksGcpServiceAccount = null,\n });\n\n var externalCreds = new Databricks.Grants(\"externalCreds\", new()\n {\n StorageCredential = external.Id,\n GrantDetails = new[]\n {\n new Databricks.Inputs.GrantsGrantArgs\n {\n Principal = \"Data Engineers\",\n Privileges = new[]\n {\n \"CREATE_EXTERNAL_TABLE\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texternal, err := databricks.NewStorageCredential(ctx, \"external\", \u0026databricks.StorageCredentialArgs{\n\t\t\tDatabricksGcpServiceAccount: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewGrants(ctx, \"externalCreds\", \u0026databricks.GrantsArgs{\n\t\t\tStorageCredential: external.ID(),\n\t\t\tGrants: databricks.GrantsGrantArray{\n\t\t\t\t\u0026databricks.GrantsGrantArgs{\n\t\t\t\t\tPrincipal: pulumi.String(\"Data Engineers\"),\n\t\t\t\t\tPrivileges: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"CREATE_EXTERNAL_TABLE\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialDatabricksGcpServiceAccountArgs;\nimport com.pulumi.databricks.Grants;\nimport com.pulumi.databricks.GrantsArgs;\nimport com.pulumi.databricks.inputs.GrantsGrantArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var external = new StorageCredential(\"external\", StorageCredentialArgs.builder() \n .databricksGcpServiceAccount()\n .build());\n\n var externalCreds = new Grants(\"externalCreds\", GrantsArgs.builder() \n .storageCredential(external.id())\n .grants(GrantsGrantArgs.builder()\n .principal(\"Data Engineers\")\n .privileges(\"CREATE_EXTERNAL_TABLE\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n external:\n type: databricks:StorageCredential\n properties:\n databricksGcpServiceAccount: {}\n externalCreds:\n type: databricks:Grants\n properties:\n storageCredential: ${external.id}\n grants:\n - principal: Data Engineers\n privileges:\n - CREATE_EXTERNAL_TABLE\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by namebash\n\n```sh\n $ pulumi import databricks:index/storageCredential:StorageCredential this \u003cname\u003e\n```\n\n ", + "description": "\u003e **Note** This resource could be used with account or workspace-level provider.\n\nTo work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage:\n\n- `databricks.StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential.\n- databricks.ExternalLocation are objects that combine a cloud storage path with a Storage Credential that can be used to access the location.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\nFor AWS\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst external = new databricks.StorageCredential(\"external\", {\n awsIamRole: {\n roleArn: aws_iam_role.external_data_access.arn,\n },\n comment: \"Managed by TF\",\n});\nconst externalCreds = new databricks.Grants(\"externalCreds\", {\n storageCredential: external.id,\n grants: [{\n principal: \"Data Engineers\",\n privileges: [\"CREATE_EXTERNAL_TABLE\"],\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nexternal = databricks.StorageCredential(\"external\",\n aws_iam_role=databricks.StorageCredentialAwsIamRoleArgs(\n role_arn=aws_iam_role[\"external_data_access\"][\"arn\"],\n ),\n comment=\"Managed by TF\")\nexternal_creds = databricks.Grants(\"externalCreds\",\n storage_credential=external.id,\n grants=[databricks.GrantsGrantArgs(\n principal=\"Data Engineers\",\n privileges=[\"CREATE_EXTERNAL_TABLE\"],\n )])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var external = new Databricks.StorageCredential(\"external\", new()\n {\n AwsIamRole = new Databricks.Inputs.StorageCredentialAwsIamRoleArgs\n {\n RoleArn = aws_iam_role.External_data_access.Arn,\n },\n Comment = \"Managed by TF\",\n });\n\n var externalCreds = new Databricks.Grants(\"externalCreds\", new()\n {\n StorageCredential = external.Id,\n GrantDetails = new[]\n {\n new Databricks.Inputs.GrantsGrantArgs\n {\n Principal = \"Data Engineers\",\n Privileges = new[]\n {\n \"CREATE_EXTERNAL_TABLE\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texternal, err := databricks.NewStorageCredential(ctx, \"external\", \u0026databricks.StorageCredentialArgs{\n\t\t\tAwsIamRole: \u0026databricks.StorageCredentialAwsIamRoleArgs{\n\t\t\t\tRoleArn: pulumi.Any(aws_iam_role.External_data_access.Arn),\n\t\t\t},\n\t\t\tComment: pulumi.String(\"Managed by TF\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewGrants(ctx, \"externalCreds\", \u0026databricks.GrantsArgs{\n\t\t\tStorageCredential: external.ID(),\n\t\t\tGrants: databricks.GrantsGrantArray{\n\t\t\t\t\u0026databricks.GrantsGrantArgs{\n\t\t\t\t\tPrincipal: pulumi.String(\"Data Engineers\"),\n\t\t\t\t\tPrivileges: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"CREATE_EXTERNAL_TABLE\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialAwsIamRoleArgs;\nimport com.pulumi.databricks.Grants;\nimport com.pulumi.databricks.GrantsArgs;\nimport com.pulumi.databricks.inputs.GrantsGrantArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var external = new StorageCredential(\"external\", StorageCredentialArgs.builder() \n .awsIamRole(StorageCredentialAwsIamRoleArgs.builder()\n .roleArn(aws_iam_role.external_data_access().arn())\n .build())\n .comment(\"Managed by TF\")\n .build());\n\n var externalCreds = new Grants(\"externalCreds\", GrantsArgs.builder() \n .storageCredential(external.id())\n .grants(GrantsGrantArgs.builder()\n .principal(\"Data Engineers\")\n .privileges(\"CREATE_EXTERNAL_TABLE\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n external:\n type: databricks:StorageCredential\n properties:\n awsIamRole:\n roleArn: ${aws_iam_role.external_data_access.arn}\n comment: Managed by TF\n externalCreds:\n type: databricks:Grants\n properties:\n storageCredential: ${external.id}\n grants:\n - principal: Data Engineers\n privileges:\n - CREATE_EXTERNAL_TABLE\n```\n\nFor Azure\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst externalMi = new databricks.StorageCredential(\"externalMi\", {\n azureManagedIdentity: {\n accessConnectorId: azurerm_databricks_access_connector.example.id,\n },\n comment: \"Managed identity credential managed by TF\",\n});\nconst externalCreds = new databricks.Grants(\"externalCreds\", {\n storageCredential: databricks_storage_credential.external.id,\n grants: [{\n principal: \"Data Engineers\",\n privileges: [\"CREATE_EXTERNAL_TABLE\"],\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nexternal_mi = databricks.StorageCredential(\"externalMi\",\n azure_managed_identity=databricks.StorageCredentialAzureManagedIdentityArgs(\n access_connector_id=azurerm_databricks_access_connector[\"example\"][\"id\"],\n ),\n comment=\"Managed identity credential managed by TF\")\nexternal_creds = databricks.Grants(\"externalCreds\",\n storage_credential=databricks_storage_credential[\"external\"][\"id\"],\n grants=[databricks.GrantsGrantArgs(\n principal=\"Data Engineers\",\n privileges=[\"CREATE_EXTERNAL_TABLE\"],\n )])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var externalMi = new Databricks.StorageCredential(\"externalMi\", new()\n {\n AzureManagedIdentity = new Databricks.Inputs.StorageCredentialAzureManagedIdentityArgs\n {\n AccessConnectorId = azurerm_databricks_access_connector.Example.Id,\n },\n Comment = \"Managed identity credential managed by TF\",\n });\n\n var externalCreds = new Databricks.Grants(\"externalCreds\", new()\n {\n StorageCredential = databricks_storage_credential.External.Id,\n GrantDetails = new[]\n {\n new Databricks.Inputs.GrantsGrantArgs\n {\n Principal = \"Data Engineers\",\n Privileges = new[]\n {\n \"CREATE_EXTERNAL_TABLE\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewStorageCredential(ctx, \"externalMi\", \u0026databricks.StorageCredentialArgs{\n\t\t\tAzureManagedIdentity: \u0026databricks.StorageCredentialAzureManagedIdentityArgs{\n\t\t\t\tAccessConnectorId: pulumi.Any(azurerm_databricks_access_connector.Example.Id),\n\t\t\t},\n\t\t\tComment: pulumi.String(\"Managed identity credential managed by TF\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewGrants(ctx, \"externalCreds\", \u0026databricks.GrantsArgs{\n\t\t\tStorageCredential: pulumi.Any(databricks_storage_credential.External.Id),\n\t\t\tGrants: databricks.GrantsGrantArray{\n\t\t\t\t\u0026databricks.GrantsGrantArgs{\n\t\t\t\t\tPrincipal: pulumi.String(\"Data Engineers\"),\n\t\t\t\t\tPrivileges: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"CREATE_EXTERNAL_TABLE\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialAzureManagedIdentityArgs;\nimport com.pulumi.databricks.Grants;\nimport com.pulumi.databricks.GrantsArgs;\nimport com.pulumi.databricks.inputs.GrantsGrantArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var externalMi = new StorageCredential(\"externalMi\", StorageCredentialArgs.builder() \n .azureManagedIdentity(StorageCredentialAzureManagedIdentityArgs.builder()\n .accessConnectorId(azurerm_databricks_access_connector.example().id())\n .build())\n .comment(\"Managed identity credential managed by TF\")\n .build());\n\n var externalCreds = new Grants(\"externalCreds\", GrantsArgs.builder() \n .storageCredential(databricks_storage_credential.external().id())\n .grants(GrantsGrantArgs.builder()\n .principal(\"Data Engineers\")\n .privileges(\"CREATE_EXTERNAL_TABLE\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n externalMi:\n type: databricks:StorageCredential\n properties:\n azureManagedIdentity:\n accessConnectorId: ${azurerm_databricks_access_connector.example.id}\n comment: Managed identity credential managed by TF\n externalCreds:\n type: databricks:Grants\n properties:\n storageCredential: ${databricks_storage_credential.external.id}\n grants:\n - principal: Data Engineers\n privileges:\n - CREATE_EXTERNAL_TABLE\n```\n\nFor GCP\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst external = new databricks.StorageCredential(\"external\", {databricksGcpServiceAccount: {}});\nconst externalCreds = new databricks.Grants(\"externalCreds\", {\n storageCredential: external.id,\n grants: [{\n principal: \"Data Engineers\",\n privileges: [\"CREATE_EXTERNAL_TABLE\"],\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nexternal = databricks.StorageCredential(\"external\", databricks_gcp_service_account=databricks.StorageCredentialDatabricksGcpServiceAccountArgs())\nexternal_creds = databricks.Grants(\"externalCreds\",\n storage_credential=external.id,\n grants=[databricks.GrantsGrantArgs(\n principal=\"Data Engineers\",\n privileges=[\"CREATE_EXTERNAL_TABLE\"],\n )])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var external = new Databricks.StorageCredential(\"external\", new()\n {\n DatabricksGcpServiceAccount = null,\n });\n\n var externalCreds = new Databricks.Grants(\"externalCreds\", new()\n {\n StorageCredential = external.Id,\n GrantDetails = new[]\n {\n new Databricks.Inputs.GrantsGrantArgs\n {\n Principal = \"Data Engineers\",\n Privileges = new[]\n {\n \"CREATE_EXTERNAL_TABLE\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texternal, err := databricks.NewStorageCredential(ctx, \"external\", \u0026databricks.StorageCredentialArgs{\n\t\t\tDatabricksGcpServiceAccount: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewGrants(ctx, \"externalCreds\", \u0026databricks.GrantsArgs{\n\t\t\tStorageCredential: external.ID(),\n\t\t\tGrants: databricks.GrantsGrantArray{\n\t\t\t\t\u0026databricks.GrantsGrantArgs{\n\t\t\t\t\tPrincipal: pulumi.String(\"Data Engineers\"),\n\t\t\t\t\tPrivileges: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"CREATE_EXTERNAL_TABLE\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialDatabricksGcpServiceAccountArgs;\nimport com.pulumi.databricks.Grants;\nimport com.pulumi.databricks.GrantsArgs;\nimport com.pulumi.databricks.inputs.GrantsGrantArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var external = new StorageCredential(\"external\", StorageCredentialArgs.builder() \n .databricksGcpServiceAccount()\n .build());\n\n var externalCreds = new Grants(\"externalCreds\", GrantsArgs.builder() \n .storageCredential(external.id())\n .grants(GrantsGrantArgs.builder()\n .principal(\"Data Engineers\")\n .privileges(\"CREATE_EXTERNAL_TABLE\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n external:\n type: databricks:StorageCredential\n properties:\n databricksGcpServiceAccount: {}\n externalCreds:\n type: databricks:Grants\n properties:\n storageCredential: ${external.id}\n grants:\n - principal: Data Engineers\n privileges:\n - CREATE_EXTERNAL_TABLE\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by namebash\n\n```sh\n $ pulumi import databricks:index/storageCredential:StorageCredential this \u003cname\u003e\n```\n\n ", "properties": { "awsIamRole": { "$ref": "#/types/databricks:index/StorageCredentialAwsIamRole:StorageCredentialAwsIamRole" @@ -18573,6 +18948,10 @@ "readOnly": { "type": "boolean", "description": "Indicates whether the storage credential is only usable for read operations.\n" + }, + "skipValidation": { + "type": "boolean", + "description": "Suppress validation errors if any \u0026 force save the storage credential.\n" } }, "required": [ @@ -18623,6 +19002,10 @@ "readOnly": { "type": "boolean", "description": "Indicates whether the storage credential is only usable for read operations.\n" + }, + "skipValidation": { + "type": "boolean", + "description": "Suppress validation errors if any \u0026 force save the storage credential.\n" } }, "stateInputs": { @@ -18669,13 +19052,17 @@ "readOnly": { "type": "boolean", "description": "Indicates whether the storage credential is only usable for read operations.\n" + }, + "skipValidation": { + "type": "boolean", + "description": "Suppress validation errors if any \u0026 force save the storage credential.\n" } }, "type": "object" } }, "databricks:index/systemSchema:SystemSchema": { - "description": "\u003e **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html).\n\n\u003e **Notes**\n Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future.\n\nManages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\nEnable the system schema `access`\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.SystemSchema(\"this\", {schema: \"access\"});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.SystemSchema(\"this\", schema=\"access\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.SystemSchema(\"this\", new()\n {\n Schema = \"access\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewSystemSchema(ctx, \"this\", \u0026databricks.SystemSchemaArgs{\n\t\t\tSchema: pulumi.String(\"access\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.SystemSchema;\nimport com.pulumi.databricks.SystemSchemaArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new SystemSchema(\"this\", SystemSchemaArgs.builder() \n .schema(\"access\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:SystemSchema\n properties:\n schema: access\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by the metastore id and schema name bash\n\n```sh\n $ pulumi import databricks:index/systemSchema:SystemSchema this \u003cmetastore_id\u003e|\u003cschema_name\u003e\n```\n\n ", + "description": "\u003e **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html).\n\n\u003e **Note** This resource could be only used with workspace-level provider!\n\nManages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\nEnable the system schema `access`\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst _this = new databricks.SystemSchema(\"this\", {schema: \"access\"});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.SystemSchema(\"this\", schema=\"access\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = new Databricks.SystemSchema(\"this\", new()\n {\n Schema = \"access\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewSystemSchema(ctx, \"this\", \u0026databricks.SystemSchemaArgs{\n\t\t\tSchema: pulumi.String(\"access\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.SystemSchema;\nimport com.pulumi.databricks.SystemSchemaArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var this_ = new SystemSchema(\"this\", SystemSchemaArgs.builder() \n .schema(\"access\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n this:\n type: databricks:SystemSchema\n properties:\n schema: access\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by the metastore id and schema name bash\n\n```sh\n $ pulumi import databricks:index/systemSchema:SystemSchema this \u003cmetastore_id\u003e|\u003cschema_name\u003e\n```\n\n ", "properties": { "metastoreId": { "type": "string" @@ -19266,7 +19653,7 @@ } }, "databricks:index/volume:Volume": { - "description": "\u003e **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html).\n\nVolumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data.\n\nA volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog.\n\nA volume can be **managed** or **external**.\n\nA **managed volume** is a Unity Catalog-governed storage volume created within the default storage location of the containing schema. Managed volumes allow the creation of governed storage for working with files without the overhead of external locations and storage credentials. You do not need to specify a location when creating a managed volume, and all file access for data in managed volumes is through paths managed by Unity Catalog.\n\nAn **external volume** is a Unity Catalog-governed storage volume registered against a directory within an external location.\n\nA volume can be referenced using its identifier: ```\u003ccatalogName\u003e.\u003cschemaName\u003e.\u003cvolumeName\u003e```, where:\n\n* ```\u003ccatalogName\u003e```: The name of the catalog containing the Volume.\n* ```\u003cschemaName\u003e```: The name of the schema containing the Volume.\n* ```\u003cvolumeName\u003e```: The name of the Volume. It identifies the volume object.\n\nThe path to access files in volumes uses the following format:\n\n\nThis resource manages Volumes in Unity Catalog.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst sandbox = new databricks.Catalog(\"sandbox\", {\n comment: \"this catalog is managed by terraform\",\n properties: {\n purpose: \"testing\",\n },\n});\nconst things = new databricks.Schema(\"things\", {\n catalogName: sandbox.name,\n comment: \"this schema is managed by terraform\",\n properties: {\n kind: \"various\",\n },\n});\nconst external = new databricks.StorageCredential(\"external\", {awsIamRole: {\n roleArn: aws_iam_role.external_data_access.arn,\n}});\nconst some = new databricks.ExternalLocation(\"some\", {\n url: `s3://${aws_s3_bucket.external.id}/some`,\n credentialName: external.name,\n});\nconst _this = new databricks.Volume(\"this\", {\n catalogName: sandbox.name,\n schemaName: things.name,\n volumeType: \"EXTERNAL\",\n storageLocation: some.url,\n comment: \"this volume is managed by terraform\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nsandbox = databricks.Catalog(\"sandbox\",\n comment=\"this catalog is managed by terraform\",\n properties={\n \"purpose\": \"testing\",\n })\nthings = databricks.Schema(\"things\",\n catalog_name=sandbox.name,\n comment=\"this schema is managed by terraform\",\n properties={\n \"kind\": \"various\",\n })\nexternal = databricks.StorageCredential(\"external\", aws_iam_role=databricks.StorageCredentialAwsIamRoleArgs(\n role_arn=aws_iam_role[\"external_data_access\"][\"arn\"],\n))\nsome = databricks.ExternalLocation(\"some\",\n url=f\"s3://{aws_s3_bucket['external']['id']}/some\",\n credential_name=external.name)\nthis = databricks.Volume(\"this\",\n catalog_name=sandbox.name,\n schema_name=things.name,\n volume_type=\"EXTERNAL\",\n storage_location=some.url,\n comment=\"this volume is managed by terraform\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var sandbox = new Databricks.Catalog(\"sandbox\", new()\n {\n Comment = \"this catalog is managed by terraform\",\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n var things = new Databricks.Schema(\"things\", new()\n {\n CatalogName = sandbox.Name,\n Comment = \"this schema is managed by terraform\",\n Properties = \n {\n { \"kind\", \"various\" },\n },\n });\n\n var external = new Databricks.StorageCredential(\"external\", new()\n {\n AwsIamRole = new Databricks.Inputs.StorageCredentialAwsIamRoleArgs\n {\n RoleArn = aws_iam_role.External_data_access.Arn,\n },\n });\n\n var some = new Databricks.ExternalLocation(\"some\", new()\n {\n Url = $\"s3://{aws_s3_bucket.External.Id}/some\",\n CredentialName = external.Name,\n });\n\n var @this = new Databricks.Volume(\"this\", new()\n {\n CatalogName = sandbox.Name,\n SchemaName = things.Name,\n VolumeType = \"EXTERNAL\",\n StorageLocation = some.Url,\n Comment = \"this volume is managed by terraform\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsandbox, err := databricks.NewCatalog(ctx, \"sandbox\", \u0026databricks.CatalogArgs{\n\t\t\tComment: pulumi.String(\"this catalog is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tthings, err := databricks.NewSchema(ctx, \"things\", \u0026databricks.SchemaArgs{\n\t\t\tCatalogName: sandbox.Name,\n\t\t\tComment: pulumi.String(\"this schema is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"kind\": pulumi.Any(\"various\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texternal, err := databricks.NewStorageCredential(ctx, \"external\", \u0026databricks.StorageCredentialArgs{\n\t\t\tAwsIamRole: \u0026databricks.StorageCredentialAwsIamRoleArgs{\n\t\t\t\tRoleArn: pulumi.Any(aws_iam_role.External_data_access.Arn),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsome, err := databricks.NewExternalLocation(ctx, \"some\", \u0026databricks.ExternalLocationArgs{\n\t\t\tUrl: pulumi.String(fmt.Sprintf(\"s3://%v/some\", aws_s3_bucket.External.Id)),\n\t\t\tCredentialName: external.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewVolume(ctx, \"this\", \u0026databricks.VolumeArgs{\n\t\t\tCatalogName: sandbox.Name,\n\t\t\tSchemaName: things.Name,\n\t\t\tVolumeType: pulumi.String(\"EXTERNAL\"),\n\t\t\tStorageLocation: some.Url,\n\t\t\tComment: pulumi.String(\"this volume is managed by terraform\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Catalog;\nimport com.pulumi.databricks.CatalogArgs;\nimport com.pulumi.databricks.Schema;\nimport com.pulumi.databricks.SchemaArgs;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialAwsIamRoleArgs;\nimport com.pulumi.databricks.ExternalLocation;\nimport com.pulumi.databricks.ExternalLocationArgs;\nimport com.pulumi.databricks.Volume;\nimport com.pulumi.databricks.VolumeArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var sandbox = new Catalog(\"sandbox\", CatalogArgs.builder() \n .comment(\"this catalog is managed by terraform\")\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n var things = new Schema(\"things\", SchemaArgs.builder() \n .catalogName(sandbox.name())\n .comment(\"this schema is managed by terraform\")\n .properties(Map.of(\"kind\", \"various\"))\n .build());\n\n var external = new StorageCredential(\"external\", StorageCredentialArgs.builder() \n .awsIamRole(StorageCredentialAwsIamRoleArgs.builder()\n .roleArn(aws_iam_role.external_data_access().arn())\n .build())\n .build());\n\n var some = new ExternalLocation(\"some\", ExternalLocationArgs.builder() \n .url(String.format(\"s3://%s/some\", aws_s3_bucket.external().id()))\n .credentialName(external.name())\n .build());\n\n var this_ = new Volume(\"this\", VolumeArgs.builder() \n .catalogName(sandbox.name())\n .schemaName(things.name())\n .volumeType(\"EXTERNAL\")\n .storageLocation(some.url())\n .comment(\"this volume is managed by terraform\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n sandbox:\n type: databricks:Catalog\n properties:\n comment: this catalog is managed by terraform\n properties:\n purpose: testing\n things:\n type: databricks:Schema\n properties:\n catalogName: ${sandbox.name}\n comment: this schema is managed by terraform\n properties:\n kind: various\n external:\n type: databricks:StorageCredential\n properties:\n awsIamRole:\n roleArn: ${aws_iam_role.external_data_access.arn}\n some:\n type: databricks:ExternalLocation\n properties:\n url: s3://${aws_s3_bucket.external.id}/some\n credentialName: ${external.name}\n this:\n type: databricks:Volume\n properties:\n catalogName: ${sandbox.name}\n schemaName: ${things.name}\n volumeType: EXTERNAL\n storageLocation: ${some.url}\n comment: this volume is managed by terraform\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by `full_name` which is the 3-level Volume identifier`\u003ccatalog\u003e.\u003cschema\u003e.\u003cname\u003e` bash\n\n```sh\n $ pulumi import databricks:index/volume:Volume this \u003ccatalog_name\u003e.\u003cschema_name\u003e.\u003cname\u003e\n```\n\n ", + "description": "\u003e **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html).\n\n\u003e **Note** This resource could be only used with workspace-level provider!\n\nVolumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data.\n\nA volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog.\n\nA volume can be **managed** or **external**.\n\nA **managed volume** is a Unity Catalog-governed storage volume created within the default storage location of the containing schema. Managed volumes allow the creation of governed storage for working with files without the overhead of external locations and storage credentials. You do not need to specify a location when creating a managed volume, and all file access for data in managed volumes is through paths managed by Unity Catalog.\n\nAn **external volume** is a Unity Catalog-governed storage volume registered against a directory within an external location.\n\nA volume can be referenced using its identifier: ```\u003ccatalogName\u003e.\u003cschemaName\u003e.\u003cvolumeName\u003e```, where:\n\n* ```\u003ccatalogName\u003e```: The name of the catalog containing the Volume.\n* ```\u003cschemaName\u003e```: The name of the schema containing the Volume.\n* ```\u003cvolumeName\u003e```: The name of the Volume. It identifies the volume object.\n\nThe path to access files in volumes uses the following format:\n\n\nThis resource manages Volumes in Unity Catalog.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst sandbox = new databricks.Catalog(\"sandbox\", {\n comment: \"this catalog is managed by terraform\",\n properties: {\n purpose: \"testing\",\n },\n});\nconst things = new databricks.Schema(\"things\", {\n catalogName: sandbox.name,\n comment: \"this schema is managed by terraform\",\n properties: {\n kind: \"various\",\n },\n});\nconst external = new databricks.StorageCredential(\"external\", {awsIamRole: {\n roleArn: aws_iam_role.external_data_access.arn,\n}});\nconst some = new databricks.ExternalLocation(\"some\", {\n url: `s3://${aws_s3_bucket.external.id}/some`,\n credentialName: external.name,\n});\nconst _this = new databricks.Volume(\"this\", {\n catalogName: sandbox.name,\n schemaName: things.name,\n volumeType: \"EXTERNAL\",\n storageLocation: some.url,\n comment: \"this volume is managed by terraform\",\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nsandbox = databricks.Catalog(\"sandbox\",\n comment=\"this catalog is managed by terraform\",\n properties={\n \"purpose\": \"testing\",\n })\nthings = databricks.Schema(\"things\",\n catalog_name=sandbox.name,\n comment=\"this schema is managed by terraform\",\n properties={\n \"kind\": \"various\",\n })\nexternal = databricks.StorageCredential(\"external\", aws_iam_role=databricks.StorageCredentialAwsIamRoleArgs(\n role_arn=aws_iam_role[\"external_data_access\"][\"arn\"],\n))\nsome = databricks.ExternalLocation(\"some\",\n url=f\"s3://{aws_s3_bucket['external']['id']}/some\",\n credential_name=external.name)\nthis = databricks.Volume(\"this\",\n catalog_name=sandbox.name,\n schema_name=things.name,\n volume_type=\"EXTERNAL\",\n storage_location=some.url,\n comment=\"this volume is managed by terraform\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var sandbox = new Databricks.Catalog(\"sandbox\", new()\n {\n Comment = \"this catalog is managed by terraform\",\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n var things = new Databricks.Schema(\"things\", new()\n {\n CatalogName = sandbox.Name,\n Comment = \"this schema is managed by terraform\",\n Properties = \n {\n { \"kind\", \"various\" },\n },\n });\n\n var external = new Databricks.StorageCredential(\"external\", new()\n {\n AwsIamRole = new Databricks.Inputs.StorageCredentialAwsIamRoleArgs\n {\n RoleArn = aws_iam_role.External_data_access.Arn,\n },\n });\n\n var some = new Databricks.ExternalLocation(\"some\", new()\n {\n Url = $\"s3://{aws_s3_bucket.External.Id}/some\",\n CredentialName = external.Name,\n });\n\n var @this = new Databricks.Volume(\"this\", new()\n {\n CatalogName = sandbox.Name,\n SchemaName = things.Name,\n VolumeType = \"EXTERNAL\",\n StorageLocation = some.Url,\n Comment = \"this volume is managed by terraform\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsandbox, err := databricks.NewCatalog(ctx, \"sandbox\", \u0026databricks.CatalogArgs{\n\t\t\tComment: pulumi.String(\"this catalog is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tthings, err := databricks.NewSchema(ctx, \"things\", \u0026databricks.SchemaArgs{\n\t\t\tCatalogName: sandbox.Name,\n\t\t\tComment: pulumi.String(\"this schema is managed by terraform\"),\n\t\t\tProperties: pulumi.Map{\n\t\t\t\t\"kind\": pulumi.Any(\"various\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texternal, err := databricks.NewStorageCredential(ctx, \"external\", \u0026databricks.StorageCredentialArgs{\n\t\t\tAwsIamRole: \u0026databricks.StorageCredentialAwsIamRoleArgs{\n\t\t\t\tRoleArn: pulumi.Any(aws_iam_role.External_data_access.Arn),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsome, err := databricks.NewExternalLocation(ctx, \"some\", \u0026databricks.ExternalLocationArgs{\n\t\t\tUrl: pulumi.String(fmt.Sprintf(\"s3://%v/some\", aws_s3_bucket.External.Id)),\n\t\t\tCredentialName: external.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewVolume(ctx, \"this\", \u0026databricks.VolumeArgs{\n\t\t\tCatalogName: sandbox.Name,\n\t\t\tSchemaName: things.Name,\n\t\t\tVolumeType: pulumi.String(\"EXTERNAL\"),\n\t\t\tStorageLocation: some.Url,\n\t\t\tComment: pulumi.String(\"this volume is managed by terraform\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Catalog;\nimport com.pulumi.databricks.CatalogArgs;\nimport com.pulumi.databricks.Schema;\nimport com.pulumi.databricks.SchemaArgs;\nimport com.pulumi.databricks.StorageCredential;\nimport com.pulumi.databricks.StorageCredentialArgs;\nimport com.pulumi.databricks.inputs.StorageCredentialAwsIamRoleArgs;\nimport com.pulumi.databricks.ExternalLocation;\nimport com.pulumi.databricks.ExternalLocationArgs;\nimport com.pulumi.databricks.Volume;\nimport com.pulumi.databricks.VolumeArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var sandbox = new Catalog(\"sandbox\", CatalogArgs.builder() \n .comment(\"this catalog is managed by terraform\")\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n var things = new Schema(\"things\", SchemaArgs.builder() \n .catalogName(sandbox.name())\n .comment(\"this schema is managed by terraform\")\n .properties(Map.of(\"kind\", \"various\"))\n .build());\n\n var external = new StorageCredential(\"external\", StorageCredentialArgs.builder() \n .awsIamRole(StorageCredentialAwsIamRoleArgs.builder()\n .roleArn(aws_iam_role.external_data_access().arn())\n .build())\n .build());\n\n var some = new ExternalLocation(\"some\", ExternalLocationArgs.builder() \n .url(String.format(\"s3://%s/some\", aws_s3_bucket.external().id()))\n .credentialName(external.name())\n .build());\n\n var this_ = new Volume(\"this\", VolumeArgs.builder() \n .catalogName(sandbox.name())\n .schemaName(things.name())\n .volumeType(\"EXTERNAL\")\n .storageLocation(some.url())\n .comment(\"this volume is managed by terraform\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n sandbox:\n type: databricks:Catalog\n properties:\n comment: this catalog is managed by terraform\n properties:\n purpose: testing\n things:\n type: databricks:Schema\n properties:\n catalogName: ${sandbox.name}\n comment: this schema is managed by terraform\n properties:\n kind: various\n external:\n type: databricks:StorageCredential\n properties:\n awsIamRole:\n roleArn: ${aws_iam_role.external_data_access.arn}\n some:\n type: databricks:ExternalLocation\n properties:\n url: s3://${aws_s3_bucket.external.id}/some\n credentialName: ${external.name}\n this:\n type: databricks:Volume\n properties:\n catalogName: ${sandbox.name}\n schemaName: ${things.name}\n volumeType: EXTERNAL\n storageLocation: ${some.url}\n comment: this volume is managed by terraform\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by `full_name` which is the 3-level Volume identifier`\u003ccatalog\u003e.\u003cschema\u003e.\u003cname\u003e` bash\n\n```sh\n $ pulumi import databricks:index/volume:Volume this \u003ccatalog_name\u003e.\u003cschema_name\u003e.\u003cname\u003e\n```\n\n ", "properties": { "catalogName": { "type": "string", @@ -19932,6 +20319,41 @@ ] } }, + "databricks:index/getCurrentMetastore:getCurrentMetastore": { + "description": "Retrieves information about metastore attached to a given workspace.\n\n\u003e **Note** This is the workspace-level data source.\n\n\u003e **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\nMetastoreSummary response for a metastore attached to the current workspace.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst this = databricks.getCurrentMetastore({});\nexport const someMetastore = data.databricks_metastore[\"this\"].metastore_info[0];\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis = databricks.get_current_metastore()\npulumi.export(\"someMetastore\", data[\"databricks_metastore\"][\"this\"][\"metastore_info\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @this = Databricks.GetCurrentMetastore.Invoke();\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"someMetastore\"] = data.Databricks_metastore.This.Metastore_info[0],\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.GetCurrentMetastore(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"someMetastore\", data.Databricks_metastore.This.Metastore_info[0])\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetCurrentMetastoreArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var this = DatabricksFunctions.getCurrentMetastore();\n\n ctx.export(\"someMetastore\", data.databricks_metastore().this().metastore_info()[0]);\n }\n}\n```\n```yaml\nvariables:\n this:\n fn::invoke:\n Function: databricks:getCurrentMetastore\n Arguments: {}\noutputs:\n someMetastore: ${data.databricks_metastore.this.metastore_info[0]}\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are used in the same context:\n\n* databricks.Metastore to get information for a metastore with a given ID.\n* databricks.getMetastores to get a mapping of name to id of all metastores.\n* databricks.Metastore to manage Metastores within Unity Catalog.\n* databricks.Catalog to manage catalogs within Unity Catalog.\n", + "inputs": { + "description": "A collection of arguments for invoking getCurrentMetastore.\n", + "properties": { + "id": { + "type": "string", + "description": "metastore ID.\n" + }, + "metastoreInfo": { + "$ref": "#/types/databricks:index/getCurrentMetastoreMetastoreInfo:getCurrentMetastoreMetastoreInfo", + "description": "summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details):\n" + } + }, + "type": "object" + }, + "outputs": { + "description": "A collection of values returned by getCurrentMetastore.\n", + "properties": { + "id": { + "type": "string", + "description": "metastore ID.\n" + }, + "metastoreInfo": { + "$ref": "#/types/databricks:index/getCurrentMetastoreMetastoreInfo:getCurrentMetastoreMetastoreInfo", + "description": "summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details):\n" + } + }, + "type": "object", + "required": [ + "id", + "metastoreInfo" + ] + } + }, "databricks:index/getCurrentUser:getCurrentUser": { "description": "## Exported attributes\n\nData source exposes the following attributes:\n\n* `id` - The id of the calling user.\n* `external_id` - ID of the user in an external identity provider.\n* `user_name` - Name of the user, e.g. `mr.foo@example.com`. If the currently logged-in identity is a service principal, returns the application ID, e.g. `11111111-2222-3333-4444-555666777888`\n* `home` - Home folder of the user, e.g. `/Users/mr.foo@example.com`.\n* `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`.\n* `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`.\n* `workspace_url` - URL of the current Databricks workspace.\n* `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal.\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* End to end workspace management guide\n* databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html).\n* databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html).\n* databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html).\n", "outputs": { @@ -20115,12 +20537,17 @@ }, "path": { "type": "string" + }, + "workspacePath": { + "type": "string", + "description": "path on Workspace File System (WSFS) in form of `/Workspace` + `path`\n" } }, "type": "object", "required": [ "objectId", "path", + "workspacePath", "id" ] } @@ -21075,7 +21502,7 @@ }, "displayName": { "type": "string", - "description": "Display name of the service principal, e.g. `Foo SPN`.\n" + "description": "Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown.\n" }, "externalId": { "type": "string", @@ -21410,6 +21837,10 @@ "type": "string", "description": "The size of the clusters allocated to the warehouse: \"2X-Small\", \"X-Small\", \"Small\", \"Medium\", \"Large\", \"X-Large\", \"2X-Large\", \"3X-Large\", \"4X-Large\".\n" }, + "creatorName": { + "type": "string", + "description": "The username of the user who created the endpoint.\n" + }, "dataSourceId": { "type": "string", "description": "ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse.\n" @@ -21422,6 +21853,10 @@ "type": "boolean", "description": "Whether this SQL warehouse is a serverless SQL warehouse.\n" }, + "health": { + "$ref": "#/types/databricks:index/getSqlWarehouseHealth:getSqlWarehouseHealth", + "description": "Health status of the endpoint.\n" + }, "id": { "type": "string", "description": "The ID of the SQL warehouse.\n" @@ -21445,8 +21880,13 @@ "type": "string", "description": "Name of the SQL warehouse to search (case-sensitive).\n" }, + "numActiveSessions": { + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" + }, "numClusters": { - "type": "integer" + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" }, "odbcParams": { "$ref": "#/types/databricks:index/getSqlWarehouseOdbcParams:getSqlWarehouseOdbcParams", @@ -21457,11 +21897,16 @@ "description": "The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`.\n" }, "state": { - "type": "string" + "type": "string", + "description": "The current state of the endpoint.\n" }, "tags": { "$ref": "#/types/databricks:index/getSqlWarehouseTags:getSqlWarehouseTags", "description": "tags used for SQL warehouse resources.\n" + }, + "warehouseType": { + "type": "string", + "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types).\n" } }, "type": "object" @@ -21481,6 +21926,10 @@ "type": "string", "description": "The size of the clusters allocated to the warehouse: \"2X-Small\", \"X-Small\", \"Small\", \"Medium\", \"Large\", \"X-Large\", \"2X-Large\", \"3X-Large\", \"4X-Large\".\n" }, + "creatorName": { + "type": "string", + "description": "The username of the user who created the endpoint.\n" + }, "dataSourceId": { "type": "string", "description": "ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse.\n" @@ -21493,6 +21942,10 @@ "type": "boolean", "description": "Whether this SQL warehouse is a serverless SQL warehouse.\n" }, + "health": { + "$ref": "#/types/databricks:index/getSqlWarehouseHealth:getSqlWarehouseHealth", + "description": "Health status of the endpoint.\n" + }, "id": { "type": "string", "description": "The ID of the SQL warehouse.\n" @@ -21516,8 +21969,13 @@ "type": "string", "description": "Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`.\n" }, + "numActiveSessions": { + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" + }, "numClusters": { - "type": "integer" + "type": "integer", + "description": "The current number of clusters used by the endpoint.\n" }, "odbcParams": { "$ref": "#/types/databricks:index/getSqlWarehouseOdbcParams:getSqlWarehouseOdbcParams", @@ -21528,11 +21986,16 @@ "description": "The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`.\n" }, "state": { - "type": "string" + "type": "string", + "description": "The current state of the endpoint.\n" }, "tags": { "$ref": "#/types/databricks:index/getSqlWarehouseTags:getSqlWarehouseTags", "description": "tags used for SQL warehouse resources.\n" + }, + "warehouseType": { + "type": "string", + "description": "SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types).\n" } }, "type": "object", @@ -21540,20 +22003,24 @@ "autoStopMins", "channel", "clusterSize", + "creatorName", "dataSourceId", "enablePhoton", "enableServerlessCompute", + "health", "id", "instanceProfileArn", "jdbcUrl", "maxNumClusters", "minNumClusters", "name", + "numActiveSessions", "numClusters", "odbcParams", "spotInstancePolicy", "state", - "tags" + "tags", + "warehouseType" ] } }, diff --git a/provider/go.mod b/provider/go.mod index 365310d0..dafd03da 100644 --- a/provider/go.mod +++ b/provider/go.mod @@ -5,8 +5,8 @@ go 1.21 replace github.com/hashicorp/terraform-plugin-sdk/v2 => github.com/pulumi/terraform-plugin-sdk/v2 v2.0.0-20230912190043-e6d96b3b8f7e require ( - github.com/databricks/databricks-sdk-go v0.28.1 - github.com/databricks/terraform-provider-databricks v1.33.0 + github.com/databricks/databricks-sdk-go v0.29.0 + github.com/databricks/terraform-provider-databricks v1.34.0 github.com/pulumi/pulumi-terraform-bridge/v3 v3.70.0 ) diff --git a/provider/go.sum b/provider/go.sum index 326cdf41..2e20e36e 100644 --- a/provider/go.sum +++ b/provider/go.sum @@ -1148,10 +1148,10 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/databricks/databricks-sdk-go v0.28.1 h1:RH5sPSnzQjZ0x3yWP8+omuBXKLMI2P1lJO8B7BJVhQs= -github.com/databricks/databricks-sdk-go v0.28.1/go.mod h1:AGzQDmVUcf/J9ARx2FgObcRI5RO2VZ1jehhxFM6tA60= -github.com/databricks/terraform-provider-databricks v1.33.0 h1:+oQBnzq1oIuZBklWQadnApQSEYuiZ3Uz8fFgSnczDgk= -github.com/databricks/terraform-provider-databricks v1.33.0/go.mod h1:+xN71Tidx8HxKi0KfUaF3vuoinvTxhvUQLEac664p28= +github.com/databricks/databricks-sdk-go v0.29.0 h1:p53y3IvYjNvWve3ALXdsJx67RPk/M4rt0JBgweq5s2Y= +github.com/databricks/databricks-sdk-go v0.29.0/go.mod h1:4Iy1e1XZiMC15BfWMQVrtr6va8wSEkiUXv0ZRMfgo3w= +github.com/databricks/terraform-provider-databricks v1.34.0 h1:EFVGTHivjCMUFtbhza+JSiQpC6SvMmiVL5D4uDbUK34= +github.com/databricks/terraform-provider-databricks v1.34.0/go.mod h1:1/6tWlnHIAsJDjwGI4ASDdLJdq4Hxh/KH+ic9gwELLs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/sdk/dotnet/AccessControlRuleSet.cs b/sdk/dotnet/AccessControlRuleSet.cs index d7bd1e78..37f145a4 100644 --- a/sdk/dotnet/AccessControlRuleSet.cs +++ b/sdk/dotnet/AccessControlRuleSet.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be used with account or workspace-level provider. + /// /// This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. /// /// > **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`. diff --git a/sdk/dotnet/Connection.cs b/sdk/dotnet/Connection.cs index 06a07917..576546f5 100644 --- a/sdk/dotnet/Connection.cs +++ b/sdk/dotnet/Connection.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: /// /// - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. diff --git a/sdk/dotnet/DefaultNamespaceSetting.cs b/sdk/dotnet/DefaultNamespaceSetting.cs index c6031a5b..e2c9e187 100644 --- a/sdk/dotnet/DefaultNamespaceSetting.cs +++ b/sdk/dotnet/DefaultNamespaceSetting.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// The `databricks.DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. /// Setting the default catalog for the workspace determines the catalog that is used when queries do not reference /// a fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query diff --git a/sdk/dotnet/Directory.cs b/sdk/dotnet/Directory.cs index fc632144..8e920860 100644 --- a/sdk/dotnet/Directory.cs +++ b/sdk/dotnet/Directory.cs @@ -36,6 +36,12 @@ public partial class Directory : global::Pulumi.CustomResource [Output("path")] public Output Path { get; private set; } = null!; + /// + /// path on Workspace File System (WSFS) in form of `/Workspace` + `path` + /// + [Output("workspacePath")] + public Output WorkspacePath { get; private set; } = null!; + /// /// Create a Directory resource with the given unique name, arguments, and options. @@ -120,6 +126,12 @@ public sealed class DirectoryState : global::Pulumi.ResourceArgs [Input("path")] public Input? Path { get; set; } + /// + /// path on Workspace File System (WSFS) in form of `/Workspace` + `path` + /// + [Input("workspacePath")] + public Input? WorkspacePath { get; set; } + public DirectoryState() { } diff --git a/sdk/dotnet/ExternalLocation.cs b/sdk/dotnet/ExternalLocation.cs index 52d5119b..ac01d37b 100644 --- a/sdk/dotnet/ExternalLocation.cs +++ b/sdk/dotnet/ExternalLocation.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: /// /// - databricks.StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. diff --git a/sdk/dotnet/GetCurrentMetastore.cs b/sdk/dotnet/GetCurrentMetastore.cs new file mode 100644 index 00000000..a202ac09 --- /dev/null +++ b/sdk/dotnet/GetCurrentMetastore.cs @@ -0,0 +1,165 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks +{ + public static class GetCurrentMetastore + { + /// + /// Retrieves information about metastore attached to a given workspace. + /// + /// > **Note** This is the workspace-level data source. + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + /// + /// {{% examples %}} + /// ## Example Usage + /// {{% example %}} + /// + /// MetastoreSummary response for a metastore attached to the current workspace. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetCurrentMetastore.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["someMetastore"] = data.Databricks_metastore.This.Metastore_info[0], + /// }; + /// }); + /// ``` + /// {{% /example %}} + /// {{% /examples %}} + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Metastore to get information for a metastore with a given ID. + /// * databricks.getMetastores to get a mapping of name to id of all metastores. + /// * databricks.Metastore to manage Metastores within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Task InvokeAsync(GetCurrentMetastoreArgs? args = null, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("databricks:index/getCurrentMetastore:getCurrentMetastore", args ?? new GetCurrentMetastoreArgs(), options.WithDefaults()); + + /// + /// Retrieves information about metastore attached to a given workspace. + /// + /// > **Note** This is the workspace-level data source. + /// + /// > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + /// + /// {{% examples %}} + /// ## Example Usage + /// {{% example %}} + /// + /// MetastoreSummary response for a metastore attached to the current workspace. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @this = Databricks.GetCurrentMetastore.Invoke(); + /// + /// return new Dictionary<string, object?> + /// { + /// ["someMetastore"] = data.Databricks_metastore.This.Metastore_info[0], + /// }; + /// }); + /// ``` + /// {{% /example %}} + /// {{% /examples %}} + /// ## Related Resources + /// + /// The following resources are used in the same context: + /// + /// * databricks.Metastore to get information for a metastore with a given ID. + /// * databricks.getMetastores to get a mapping of name to id of all metastores. + /// * databricks.Metastore to manage Metastores within Unity Catalog. + /// * databricks.Catalog to manage catalogs within Unity Catalog. + /// + public static Output Invoke(GetCurrentMetastoreInvokeArgs? args = null, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", args ?? new GetCurrentMetastoreInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetCurrentMetastoreArgs : global::Pulumi.InvokeArgs + { + /// + /// metastore ID. + /// + [Input("id")] + public string? Id { get; set; } + + /// + /// summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + /// + [Input("metastoreInfo")] + public Inputs.GetCurrentMetastoreMetastoreInfoArgs? MetastoreInfo { get; set; } + + public GetCurrentMetastoreArgs() + { + } + public static new GetCurrentMetastoreArgs Empty => new GetCurrentMetastoreArgs(); + } + + public sealed class GetCurrentMetastoreInvokeArgs : global::Pulumi.InvokeArgs + { + /// + /// metastore ID. + /// + [Input("id")] + public Input? Id { get; set; } + + /// + /// summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + /// + [Input("metastoreInfo")] + public Input? MetastoreInfo { get; set; } + + public GetCurrentMetastoreInvokeArgs() + { + } + public static new GetCurrentMetastoreInvokeArgs Empty => new GetCurrentMetastoreInvokeArgs(); + } + + + [OutputType] + public sealed class GetCurrentMetastoreResult + { + /// + /// metastore ID. + /// + public readonly string Id; + /// + /// summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + /// + public readonly Outputs.GetCurrentMetastoreMetastoreInfoResult MetastoreInfo; + + [OutputConstructor] + private GetCurrentMetastoreResult( + string id, + + Outputs.GetCurrentMetastoreMetastoreInfoResult metastoreInfo) + { + Id = id; + MetastoreInfo = metastoreInfo; + } + } +} diff --git a/sdk/dotnet/GetDirectory.cs b/sdk/dotnet/GetDirectory.cs index e53aabd2..6ca63693 100644 --- a/sdk/dotnet/GetDirectory.cs +++ b/sdk/dotnet/GetDirectory.cs @@ -126,6 +126,10 @@ public sealed class GetDirectoryResult /// public readonly int ObjectId; public readonly string Path; + /// + /// path on Workspace File System (WSFS) in form of `/Workspace` + `path` + /// + public readonly string WorkspacePath; [OutputConstructor] private GetDirectoryResult( @@ -133,11 +137,14 @@ private GetDirectoryResult( int objectId, - string path) + string path, + + string workspacePath) { Id = id; ObjectId = objectId; Path = path; + WorkspacePath = workspacePath; } } } diff --git a/sdk/dotnet/GetServicePrincipal.cs b/sdk/dotnet/GetServicePrincipal.cs index bd58206d..ad2c61cf 100644 --- a/sdk/dotnet/GetServicePrincipal.cs +++ b/sdk/dotnet/GetServicePrincipal.cs @@ -144,7 +144,7 @@ public sealed class GetServicePrincipalArgs : global::Pulumi.InvokeArgs public string? ApplicationId { get; set; } /// - /// Display name of the service principal, e.g. `Foo SPN`. + /// Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. /// [Input("displayName")] public string? DisplayName { get; set; } @@ -203,7 +203,7 @@ public sealed class GetServicePrincipalInvokeArgs : global::Pulumi.InvokeArgs public Input? ApplicationId { get; set; } /// - /// Display name of the service principal, e.g. `Foo SPN`. + /// Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. /// [Input("displayName")] public Input? DisplayName { get; set; } diff --git a/sdk/dotnet/GetSqlWarehouse.cs b/sdk/dotnet/GetSqlWarehouse.cs index 71473e36..eb3e3a74 100644 --- a/sdk/dotnet/GetSqlWarehouse.cs +++ b/sdk/dotnet/GetSqlWarehouse.cs @@ -161,6 +161,12 @@ public sealed class GetSqlWarehouseArgs : global::Pulumi.InvokeArgs [Input("clusterSize")] public string? ClusterSize { get; set; } + /// + /// The username of the user who created the endpoint. + /// + [Input("creatorName")] + public string? CreatorName { get; set; } + /// /// ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. /// @@ -179,6 +185,12 @@ public sealed class GetSqlWarehouseArgs : global::Pulumi.InvokeArgs [Input("enableServerlessCompute")] public bool? EnableServerlessCompute { get; set; } + /// + /// Health status of the endpoint. + /// + [Input("health")] + public Inputs.GetSqlWarehouseHealthArgs? Health { get; set; } + /// /// The ID of the SQL warehouse. /// @@ -212,6 +224,15 @@ public sealed class GetSqlWarehouseArgs : global::Pulumi.InvokeArgs [Input("name")] public string? Name { get; set; } + /// + /// The current number of clusters used by the endpoint. + /// + [Input("numActiveSessions")] + public int? NumActiveSessions { get; set; } + + /// + /// The current number of clusters used by the endpoint. + /// [Input("numClusters")] public int? NumClusters { get; set; } @@ -227,6 +248,9 @@ public sealed class GetSqlWarehouseArgs : global::Pulumi.InvokeArgs [Input("spotInstancePolicy")] public string? SpotInstancePolicy { get; set; } + /// + /// The current state of the endpoint. + /// [Input("state")] public string? State { get; set; } @@ -236,6 +260,12 @@ public sealed class GetSqlWarehouseArgs : global::Pulumi.InvokeArgs [Input("tags")] public Inputs.GetSqlWarehouseTagsArgs? Tags { get; set; } + /// + /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + /// + [Input("warehouseType")] + public string? WarehouseType { get; set; } + public GetSqlWarehouseArgs() { } @@ -262,6 +292,12 @@ public sealed class GetSqlWarehouseInvokeArgs : global::Pulumi.InvokeArgs [Input("clusterSize")] public Input? ClusterSize { get; set; } + /// + /// The username of the user who created the endpoint. + /// + [Input("creatorName")] + public Input? CreatorName { get; set; } + /// /// ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. /// @@ -280,6 +316,12 @@ public sealed class GetSqlWarehouseInvokeArgs : global::Pulumi.InvokeArgs [Input("enableServerlessCompute")] public Input? EnableServerlessCompute { get; set; } + /// + /// Health status of the endpoint. + /// + [Input("health")] + public Input? Health { get; set; } + /// /// The ID of the SQL warehouse. /// @@ -313,6 +355,15 @@ public sealed class GetSqlWarehouseInvokeArgs : global::Pulumi.InvokeArgs [Input("name")] public Input? Name { get; set; } + /// + /// The current number of clusters used by the endpoint. + /// + [Input("numActiveSessions")] + public Input? NumActiveSessions { get; set; } + + /// + /// The current number of clusters used by the endpoint. + /// [Input("numClusters")] public Input? NumClusters { get; set; } @@ -328,6 +379,9 @@ public sealed class GetSqlWarehouseInvokeArgs : global::Pulumi.InvokeArgs [Input("spotInstancePolicy")] public Input? SpotInstancePolicy { get; set; } + /// + /// The current state of the endpoint. + /// [Input("state")] public Input? State { get; set; } @@ -337,6 +391,12 @@ public sealed class GetSqlWarehouseInvokeArgs : global::Pulumi.InvokeArgs [Input("tags")] public Input? Tags { get; set; } + /// + /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + /// + [Input("warehouseType")] + public Input? WarehouseType { get; set; } + public GetSqlWarehouseInvokeArgs() { } @@ -360,6 +420,10 @@ public sealed class GetSqlWarehouseResult /// public readonly string ClusterSize; /// + /// The username of the user who created the endpoint. + /// + public readonly string CreatorName; + /// /// ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. /// public readonly string DataSourceId; @@ -372,6 +436,10 @@ public sealed class GetSqlWarehouseResult /// public readonly bool EnableServerlessCompute; /// + /// Health status of the endpoint. + /// + public readonly Outputs.GetSqlWarehouseHealthResult Health; + /// /// The ID of the SQL warehouse. /// public readonly string Id; @@ -392,6 +460,13 @@ public sealed class GetSqlWarehouseResult /// Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. /// public readonly string Name; + /// + /// The current number of clusters used by the endpoint. + /// + public readonly int NumActiveSessions; + /// + /// The current number of clusters used by the endpoint. + /// public readonly int NumClusters; /// /// ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -401,11 +476,18 @@ public sealed class GetSqlWarehouseResult /// The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. /// public readonly string SpotInstancePolicy; + /// + /// The current state of the endpoint. + /// public readonly string State; /// /// tags used for SQL warehouse resources. /// public readonly Outputs.GetSqlWarehouseTagsResult Tags; + /// + /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + /// + public readonly string WarehouseType; [OutputConstructor] private GetSqlWarehouseResult( @@ -415,12 +497,16 @@ private GetSqlWarehouseResult( string clusterSize, + string creatorName, + string dataSourceId, bool enablePhoton, bool enableServerlessCompute, + Outputs.GetSqlWarehouseHealthResult health, + string id, string instanceProfileArn, @@ -433,6 +519,8 @@ private GetSqlWarehouseResult( string name, + int numActiveSessions, + int numClusters, Outputs.GetSqlWarehouseOdbcParamsResult odbcParams, @@ -441,25 +529,31 @@ private GetSqlWarehouseResult( string state, - Outputs.GetSqlWarehouseTagsResult tags) + Outputs.GetSqlWarehouseTagsResult tags, + + string warehouseType) { AutoStopMins = autoStopMins; Channel = channel; ClusterSize = clusterSize; + CreatorName = creatorName; DataSourceId = dataSourceId; EnablePhoton = enablePhoton; EnableServerlessCompute = enableServerlessCompute; + Health = health; Id = id; InstanceProfileArn = instanceProfileArn; JdbcUrl = jdbcUrl; MaxNumClusters = maxNumClusters; MinNumClusters = minNumClusters; Name = name; + NumActiveSessions = numActiveSessions; NumClusters = numClusters; OdbcParams = odbcParams; SpotInstancePolicy = spotInstancePolicy; State = state; Tags = tags; + WarehouseType = warehouseType; } } } diff --git a/sdk/dotnet/Grant.cs b/sdk/dotnet/Grant.cs new file mode 100644 index 00000000..9a756cc0 --- /dev/null +++ b/sdk/dotnet/Grant.cs @@ -0,0 +1,219 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks +{ + [DatabricksResourceType("databricks:index/grant:Grant")] + public partial class Grant : global::Pulumi.CustomResource + { + [Output("catalog")] + public Output Catalog { get; private set; } = null!; + + [Output("externalLocation")] + public Output ExternalLocation { get; private set; } = null!; + + [Output("foreignConnection")] + public Output ForeignConnection { get; private set; } = null!; + + [Output("function")] + public Output Function { get; private set; } = null!; + + [Output("metastore")] + public Output Metastore { get; private set; } = null!; + + [Output("model")] + public Output Model { get; private set; } = null!; + + [Output("pipeline")] + public Output Pipeline { get; private set; } = null!; + + [Output("principal")] + public Output Principal { get; private set; } = null!; + + [Output("privileges")] + public Output> Privileges { get; private set; } = null!; + + [Output("recipient")] + public Output Recipient { get; private set; } = null!; + + [Output("schema")] + public Output Schema { get; private set; } = null!; + + [Output("share")] + public Output Share { get; private set; } = null!; + + [Output("storageCredential")] + public Output StorageCredential { get; private set; } = null!; + + [Output("table")] + public Output Table { get; private set; } = null!; + + [Output("volume")] + public Output Volume { get; private set; } = null!; + + + /// + /// Create a Grant resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public Grant(string name, GrantArgs args, CustomResourceOptions? options = null) + : base("databricks:index/grant:Grant", name, args ?? new GrantArgs(), MakeResourceOptions(options, "")) + { + } + + private Grant(string name, Input id, GrantState? state = null, CustomResourceOptions? options = null) + : base("databricks:index/grant:Grant", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing Grant resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static Grant Get(string name, Input id, GrantState? state = null, CustomResourceOptions? options = null) + { + return new Grant(name, id, state, options); + } + } + + public sealed class GrantArgs : global::Pulumi.ResourceArgs + { + [Input("catalog")] + public Input? Catalog { get; set; } + + [Input("externalLocation")] + public Input? ExternalLocation { get; set; } + + [Input("foreignConnection")] + public Input? ForeignConnection { get; set; } + + [Input("function")] + public Input? Function { get; set; } + + [Input("metastore")] + public Input? Metastore { get; set; } + + [Input("model")] + public Input? Model { get; set; } + + [Input("pipeline")] + public Input? Pipeline { get; set; } + + [Input("principal", required: true)] + public Input Principal { get; set; } = null!; + + [Input("privileges", required: true)] + private InputList? _privileges; + public InputList Privileges + { + get => _privileges ?? (_privileges = new InputList()); + set => _privileges = value; + } + + [Input("recipient")] + public Input? Recipient { get; set; } + + [Input("schema")] + public Input? Schema { get; set; } + + [Input("share")] + public Input? Share { get; set; } + + [Input("storageCredential")] + public Input? StorageCredential { get; set; } + + [Input("table")] + public Input? Table { get; set; } + + [Input("volume")] + public Input? Volume { get; set; } + + public GrantArgs() + { + } + public static new GrantArgs Empty => new GrantArgs(); + } + + public sealed class GrantState : global::Pulumi.ResourceArgs + { + [Input("catalog")] + public Input? Catalog { get; set; } + + [Input("externalLocation")] + public Input? ExternalLocation { get; set; } + + [Input("foreignConnection")] + public Input? ForeignConnection { get; set; } + + [Input("function")] + public Input? Function { get; set; } + + [Input("metastore")] + public Input? Metastore { get; set; } + + [Input("model")] + public Input? Model { get; set; } + + [Input("pipeline")] + public Input? Pipeline { get; set; } + + [Input("principal")] + public Input? Principal { get; set; } + + [Input("privileges")] + private InputList? _privileges; + public InputList Privileges + { + get => _privileges ?? (_privileges = new InputList()); + set => _privileges = value; + } + + [Input("recipient")] + public Input? Recipient { get; set; } + + [Input("schema")] + public Input? Schema { get; set; } + + [Input("share")] + public Input? Share { get; set; } + + [Input("storageCredential")] + public Input? StorageCredential { get; set; } + + [Input("table")] + public Input? Table { get; set; } + + [Input("volume")] + public Input? Volume { get; set; } + + public GrantState() + { + } + public static new GrantState Empty => new GrantState(); + } +} diff --git a/sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfo.cs b/sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfo.cs new file mode 100644 index 00000000..94838fee --- /dev/null +++ b/sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfo.cs @@ -0,0 +1,125 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetCurrentMetastoreMetastoreInfoArgs : global::Pulumi.InvokeArgs + { + [Input("cloud")] + public string? Cloud { get; set; } + + /// + /// Timestamp (in milliseconds) when the current metastore was created. + /// + [Input("createdAt")] + public int? CreatedAt { get; set; } + + /// + /// the ID of the identity that created the current metastore. + /// + [Input("createdBy")] + public string? CreatedBy { get; set; } + + /// + /// the ID of the default data access configuration. + /// + [Input("defaultDataAccessConfigId")] + public string? DefaultDataAccessConfigId { get; set; } + + /// + /// The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + /// + [Input("deltaSharingOrganizationName")] + public string? DeltaSharingOrganizationName { get; set; } + + /// + /// the expiration duration in seconds on recipient data access tokens. + /// + [Input("deltaSharingRecipientTokenLifetimeInSeconds")] + public int? DeltaSharingRecipientTokenLifetimeInSeconds { get; set; } + + /// + /// Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + /// + [Input("deltaSharingScope")] + public string? DeltaSharingScope { get; set; } + + /// + /// Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + /// + [Input("globalMetastoreId")] + public string? GlobalMetastoreId { get; set; } + + /// + /// Metastore ID. + /// + [Input("metastoreId")] + public string? MetastoreId { get; set; } + + /// + /// Name of metastore. + /// + [Input("name")] + public string? Name { get; set; } + + /// + /// Username/group name/sp application_id of the metastore owner. + /// + [Input("owner")] + public string? Owner { get; set; } + + /// + /// the version of the privilege model used by the metastore. + /// + [Input("privilegeModelVersion")] + public string? PrivilegeModelVersion { get; set; } + + /// + /// (Mandatory for account-level) The region of the metastore. + /// + [Input("region")] + public string? Region { get; set; } + + /// + /// Path on cloud storage account, where managed `databricks.Table` are stored. + /// + [Input("storageRoot")] + public string? StorageRoot { get; set; } + + /// + /// ID of a storage credential used for the `storage_root`. + /// + [Input("storageRootCredentialId")] + public string? StorageRootCredentialId { get; set; } + + /// + /// Name of a storage credential used for the `storage_root`. + /// + [Input("storageRootCredentialName")] + public string? StorageRootCredentialName { get; set; } + + /// + /// Timestamp (in milliseconds) when the current metastore was updated. + /// + [Input("updatedAt")] + public int? UpdatedAt { get; set; } + + /// + /// the ID of the identity that updated the current metastore. + /// + [Input("updatedBy")] + public string? UpdatedBy { get; set; } + + public GetCurrentMetastoreMetastoreInfoArgs() + { + } + public static new GetCurrentMetastoreMetastoreInfoArgs Empty => new GetCurrentMetastoreMetastoreInfoArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfoArgs.cs b/sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfoArgs.cs new file mode 100644 index 00000000..dbc21584 --- /dev/null +++ b/sdk/dotnet/Inputs/GetCurrentMetastoreMetastoreInfoArgs.cs @@ -0,0 +1,125 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetCurrentMetastoreMetastoreInfoInputArgs : global::Pulumi.ResourceArgs + { + [Input("cloud")] + public Input? Cloud { get; set; } + + /// + /// Timestamp (in milliseconds) when the current metastore was created. + /// + [Input("createdAt")] + public Input? CreatedAt { get; set; } + + /// + /// the ID of the identity that created the current metastore. + /// + [Input("createdBy")] + public Input? CreatedBy { get; set; } + + /// + /// the ID of the default data access configuration. + /// + [Input("defaultDataAccessConfigId")] + public Input? DefaultDataAccessConfigId { get; set; } + + /// + /// The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + /// + [Input("deltaSharingOrganizationName")] + public Input? DeltaSharingOrganizationName { get; set; } + + /// + /// the expiration duration in seconds on recipient data access tokens. + /// + [Input("deltaSharingRecipientTokenLifetimeInSeconds")] + public Input? DeltaSharingRecipientTokenLifetimeInSeconds { get; set; } + + /// + /// Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + /// + [Input("deltaSharingScope")] + public Input? DeltaSharingScope { get; set; } + + /// + /// Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + /// + [Input("globalMetastoreId")] + public Input? GlobalMetastoreId { get; set; } + + /// + /// Metastore ID. + /// + [Input("metastoreId")] + public Input? MetastoreId { get; set; } + + /// + /// Name of metastore. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// Username/group name/sp application_id of the metastore owner. + /// + [Input("owner")] + public Input? Owner { get; set; } + + /// + /// the version of the privilege model used by the metastore. + /// + [Input("privilegeModelVersion")] + public Input? PrivilegeModelVersion { get; set; } + + /// + /// (Mandatory for account-level) The region of the metastore. + /// + [Input("region")] + public Input? Region { get; set; } + + /// + /// Path on cloud storage account, where managed `databricks.Table` are stored. + /// + [Input("storageRoot")] + public Input? StorageRoot { get; set; } + + /// + /// ID of a storage credential used for the `storage_root`. + /// + [Input("storageRootCredentialId")] + public Input? StorageRootCredentialId { get; set; } + + /// + /// Name of a storage credential used for the `storage_root`. + /// + [Input("storageRootCredentialName")] + public Input? StorageRootCredentialName { get; set; } + + /// + /// Timestamp (in milliseconds) when the current metastore was updated. + /// + [Input("updatedAt")] + public Input? UpdatedAt { get; set; } + + /// + /// the ID of the identity that updated the current metastore. + /// + [Input("updatedBy")] + public Input? UpdatedBy { get; set; } + + public GetCurrentMetastoreMetastoreInfoInputArgs() + { + } + public static new GetCurrentMetastoreMetastoreInfoInputArgs Empty => new GetCurrentMetastoreMetastoreInfoInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetMetastoreMetastoreInfo.cs b/sdk/dotnet/Inputs/GetMetastoreMetastoreInfo.cs index 18363f48..a4e99f4a 100644 --- a/sdk/dotnet/Inputs/GetMetastoreMetastoreInfo.cs +++ b/sdk/dotnet/Inputs/GetMetastoreMetastoreInfo.cs @@ -70,7 +70,7 @@ public sealed class GetMetastoreMetastoreInfoArgs : global::Pulumi.InvokeArgs public string? Region { get; set; } /// - /// Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + /// Path on cloud storage account, where managed `databricks.Table` are stored. /// [Input("storageRoot")] public string? StorageRoot { get; set; } diff --git a/sdk/dotnet/Inputs/GetMetastoreMetastoreInfoArgs.cs b/sdk/dotnet/Inputs/GetMetastoreMetastoreInfoArgs.cs index f3bb81cc..2da304ae 100644 --- a/sdk/dotnet/Inputs/GetMetastoreMetastoreInfoArgs.cs +++ b/sdk/dotnet/Inputs/GetMetastoreMetastoreInfoArgs.cs @@ -70,7 +70,7 @@ public sealed class GetMetastoreMetastoreInfoInputArgs : global::Pulumi.Resource public Input? Region { get; set; } /// - /// Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + /// Path on cloud storage account, where managed `databricks.Table` are stored. /// [Input("storageRoot")] public Input? StorageRoot { get; set; } diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseChannel.cs b/sdk/dotnet/Inputs/GetSqlWarehouseChannel.cs index afc16e48..948e13a8 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseChannel.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseChannel.cs @@ -12,6 +12,9 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseChannelArgs : global::Pulumi.InvokeArgs { + [Input("dbsqlVersion")] + public string? DbsqlVersion { get; set; } + /// /// Name of the SQL warehouse to search (case-sensitive). /// diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseChannelArgs.cs b/sdk/dotnet/Inputs/GetSqlWarehouseChannelArgs.cs index 8b1823e1..468e57d2 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseChannelArgs.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseChannelArgs.cs @@ -12,6 +12,9 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseChannelInputArgs : global::Pulumi.ResourceArgs { + [Input("dbsqlVersion")] + public Input? DbsqlVersion { get; set; } + /// /// Name of the SQL warehouse to search (case-sensitive). /// diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseHealth.cs b/sdk/dotnet/Inputs/GetSqlWarehouseHealth.cs new file mode 100644 index 00000000..45abb260 --- /dev/null +++ b/sdk/dotnet/Inputs/GetSqlWarehouseHealth.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetSqlWarehouseHealthArgs : global::Pulumi.InvokeArgs + { + [Input("details")] + public string? Details { get; set; } + + [Input("failureReason")] + public Inputs.GetSqlWarehouseHealthFailureReasonArgs? FailureReason { get; set; } + + [Input("message")] + public string? Message { get; set; } + + [Input("status")] + public string? Status { get; set; } + + [Input("summary")] + public string? Summary { get; set; } + + public GetSqlWarehouseHealthArgs() + { + } + public static new GetSqlWarehouseHealthArgs Empty => new GetSqlWarehouseHealthArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseHealthArgs.cs b/sdk/dotnet/Inputs/GetSqlWarehouseHealthArgs.cs new file mode 100644 index 00000000..aea4794a --- /dev/null +++ b/sdk/dotnet/Inputs/GetSqlWarehouseHealthArgs.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetSqlWarehouseHealthInputArgs : global::Pulumi.ResourceArgs + { + [Input("details")] + public Input? Details { get; set; } + + [Input("failureReason")] + public Input? FailureReason { get; set; } + + [Input("message")] + public Input? Message { get; set; } + + [Input("status")] + public Input? Status { get; set; } + + [Input("summary")] + public Input? Summary { get; set; } + + public GetSqlWarehouseHealthInputArgs() + { + } + public static new GetSqlWarehouseHealthInputArgs Empty => new GetSqlWarehouseHealthInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReason.cs b/sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReason.cs new file mode 100644 index 00000000..3081a793 --- /dev/null +++ b/sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReason.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetSqlWarehouseHealthFailureReasonArgs : global::Pulumi.InvokeArgs + { + [Input("code")] + public string? Code { get; set; } + + [Input("parameters")] + private Dictionary? _parameters; + public Dictionary Parameters + { + get => _parameters ?? (_parameters = new Dictionary()); + set => _parameters = value; + } + + [Input("type")] + public string? Type { get; set; } + + public GetSqlWarehouseHealthFailureReasonArgs() + { + } + public static new GetSqlWarehouseHealthFailureReasonArgs Empty => new GetSqlWarehouseHealthFailureReasonArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReasonArgs.cs b/sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReasonArgs.cs new file mode 100644 index 00000000..a9d411c6 --- /dev/null +++ b/sdk/dotnet/Inputs/GetSqlWarehouseHealthFailureReasonArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetSqlWarehouseHealthFailureReasonInputArgs : global::Pulumi.ResourceArgs + { + [Input("code")] + public Input? Code { get; set; } + + [Input("parameters")] + private InputMap? _parameters; + public InputMap Parameters + { + get => _parameters ?? (_parameters = new InputMap()); + set => _parameters = value; + } + + [Input("type")] + public Input? Type { get; set; } + + public GetSqlWarehouseHealthFailureReasonInputArgs() + { + } + public static new GetSqlWarehouseHealthFailureReasonInputArgs Empty => new GetSqlWarehouseHealthFailureReasonInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParams.cs b/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParams.cs index 2ecfc170..67458609 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParams.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParams.cs @@ -12,20 +12,17 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseOdbcParamsArgs : global::Pulumi.InvokeArgs { - [Input("host")] - public string? Host { get; set; } - [Input("hostname")] public string? Hostname { get; set; } - [Input("path", required: true)] - public string Path { get; set; } = null!; + [Input("path")] + public string? Path { get; set; } - [Input("port", required: true)] - public int Port { get; set; } + [Input("port")] + public int? Port { get; set; } - [Input("protocol", required: true)] - public string Protocol { get; set; } = null!; + [Input("protocol")] + public string? Protocol { get; set; } public GetSqlWarehouseOdbcParamsArgs() { diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParamsArgs.cs b/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParamsArgs.cs index e1d7cbda..6161ec55 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParamsArgs.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseOdbcParamsArgs.cs @@ -12,20 +12,17 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseOdbcParamsInputArgs : global::Pulumi.ResourceArgs { - [Input("host")] - public Input? Host { get; set; } - [Input("hostname")] public Input? Hostname { get; set; } - [Input("path", required: true)] - public Input Path { get; set; } = null!; + [Input("path")] + public Input? Path { get; set; } - [Input("port", required: true)] - public Input Port { get; set; } = null!; + [Input("port")] + public Input? Port { get; set; } - [Input("protocol", required: true)] - public Input Protocol { get; set; } = null!; + [Input("protocol")] + public Input? Protocol { get; set; } public GetSqlWarehouseOdbcParamsInputArgs() { diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseTags.cs b/sdk/dotnet/Inputs/GetSqlWarehouseTags.cs index 6645ce64..f4ad22da 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseTags.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseTags.cs @@ -12,7 +12,7 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseTagsArgs : global::Pulumi.InvokeArgs { - [Input("customTags", required: true)] + [Input("customTags")] private List? _customTags; public List CustomTags { diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseTagsArgs.cs b/sdk/dotnet/Inputs/GetSqlWarehouseTagsArgs.cs index 747af9c2..a02d1cde 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseTagsArgs.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseTagsArgs.cs @@ -12,7 +12,7 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseTagsInputArgs : global::Pulumi.ResourceArgs { - [Input("customTags", required: true)] + [Input("customTags")] private InputList? _customTags; public InputList CustomTags { diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTag.cs b/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTag.cs index 22d44390..af2ae506 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTag.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTag.cs @@ -12,11 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseTagsCustomTagArgs : global::Pulumi.InvokeArgs { - [Input("key", required: true)] - public string Key { get; set; } = null!; + [Input("key")] + public string? Key { get; set; } - [Input("value", required: true)] - public string Value { get; set; } = null!; + [Input("value")] + public string? Value { get; set; } public GetSqlWarehouseTagsCustomTagArgs() { diff --git a/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTagArgs.cs b/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTagArgs.cs index 0ffc099f..760c3137 100644 --- a/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTagArgs.cs +++ b/sdk/dotnet/Inputs/GetSqlWarehouseTagsCustomTagArgs.cs @@ -12,11 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class GetSqlWarehouseTagsCustomTagInputArgs : global::Pulumi.ResourceArgs { - [Input("key", required: true)] - public Input Key { get; set; } = null!; + [Input("key")] + public Input? Key { get; set; } - [Input("value", required: true)] - public Input Value { get; set; } = null!; + [Input("value")] + public Input? Value { get; set; } public GetSqlWarehouseTagsCustomTagInputArgs() { diff --git a/sdk/dotnet/Inputs/SqlEndpointChannelArgs.cs b/sdk/dotnet/Inputs/SqlEndpointChannelArgs.cs index 24cb0dea..274b7fe7 100644 --- a/sdk/dotnet/Inputs/SqlEndpointChannelArgs.cs +++ b/sdk/dotnet/Inputs/SqlEndpointChannelArgs.cs @@ -12,6 +12,9 @@ namespace Pulumi.Databricks.Inputs public sealed class SqlEndpointChannelArgs : global::Pulumi.ResourceArgs { + [Input("dbsqlVersion")] + public Input? DbsqlVersion { get; set; } + /// /// Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. /// diff --git a/sdk/dotnet/Inputs/SqlEndpointChannelGetArgs.cs b/sdk/dotnet/Inputs/SqlEndpointChannelGetArgs.cs index 62cc1357..bf408d0a 100644 --- a/sdk/dotnet/Inputs/SqlEndpointChannelGetArgs.cs +++ b/sdk/dotnet/Inputs/SqlEndpointChannelGetArgs.cs @@ -12,6 +12,9 @@ namespace Pulumi.Databricks.Inputs public sealed class SqlEndpointChannelGetArgs : global::Pulumi.ResourceArgs { + [Input("dbsqlVersion")] + public Input? DbsqlVersion { get; set; } + /// /// Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. /// diff --git a/sdk/dotnet/Inputs/SqlEndpointHealthArgs.cs b/sdk/dotnet/Inputs/SqlEndpointHealthArgs.cs new file mode 100644 index 00000000..00bcba65 --- /dev/null +++ b/sdk/dotnet/Inputs/SqlEndpointHealthArgs.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class SqlEndpointHealthArgs : global::Pulumi.ResourceArgs + { + [Input("details")] + public Input? Details { get; set; } + + [Input("failureReason")] + public Input? FailureReason { get; set; } + + [Input("message")] + public Input? Message { get; set; } + + [Input("status")] + public Input? Status { get; set; } + + [Input("summary")] + public Input? Summary { get; set; } + + public SqlEndpointHealthArgs() + { + } + public static new SqlEndpointHealthArgs Empty => new SqlEndpointHealthArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonArgs.cs b/sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonArgs.cs new file mode 100644 index 00000000..6b780986 --- /dev/null +++ b/sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class SqlEndpointHealthFailureReasonArgs : global::Pulumi.ResourceArgs + { + [Input("code")] + public Input? Code { get; set; } + + [Input("parameters")] + private InputMap? _parameters; + public InputMap Parameters + { + get => _parameters ?? (_parameters = new InputMap()); + set => _parameters = value; + } + + [Input("type")] + public Input? Type { get; set; } + + public SqlEndpointHealthFailureReasonArgs() + { + } + public static new SqlEndpointHealthFailureReasonArgs Empty => new SqlEndpointHealthFailureReasonArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonGetArgs.cs b/sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonGetArgs.cs new file mode 100644 index 00000000..5506a4ad --- /dev/null +++ b/sdk/dotnet/Inputs/SqlEndpointHealthFailureReasonGetArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class SqlEndpointHealthFailureReasonGetArgs : global::Pulumi.ResourceArgs + { + [Input("code")] + public Input? Code { get; set; } + + [Input("parameters")] + private InputMap? _parameters; + public InputMap Parameters + { + get => _parameters ?? (_parameters = new InputMap()); + set => _parameters = value; + } + + [Input("type")] + public Input? Type { get; set; } + + public SqlEndpointHealthFailureReasonGetArgs() + { + } + public static new SqlEndpointHealthFailureReasonGetArgs Empty => new SqlEndpointHealthFailureReasonGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SqlEndpointHealthGetArgs.cs b/sdk/dotnet/Inputs/SqlEndpointHealthGetArgs.cs new file mode 100644 index 00000000..097d682b --- /dev/null +++ b/sdk/dotnet/Inputs/SqlEndpointHealthGetArgs.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class SqlEndpointHealthGetArgs : global::Pulumi.ResourceArgs + { + [Input("details")] + public Input? Details { get; set; } + + [Input("failureReason")] + public Input? FailureReason { get; set; } + + [Input("message")] + public Input? Message { get; set; } + + [Input("status")] + public Input? Status { get; set; } + + [Input("summary")] + public Input? Summary { get; set; } + + public SqlEndpointHealthGetArgs() + { + } + public static new SqlEndpointHealthGetArgs Empty => new SqlEndpointHealthGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SqlEndpointOdbcParamsArgs.cs b/sdk/dotnet/Inputs/SqlEndpointOdbcParamsArgs.cs index 3e19d324..186c0660 100644 --- a/sdk/dotnet/Inputs/SqlEndpointOdbcParamsArgs.cs +++ b/sdk/dotnet/Inputs/SqlEndpointOdbcParamsArgs.cs @@ -12,20 +12,17 @@ namespace Pulumi.Databricks.Inputs public sealed class SqlEndpointOdbcParamsArgs : global::Pulumi.ResourceArgs { - [Input("host")] - public Input? Host { get; set; } - [Input("hostname")] public Input? Hostname { get; set; } - [Input("path", required: true)] - public Input Path { get; set; } = null!; + [Input("path")] + public Input? Path { get; set; } - [Input("port", required: true)] - public Input Port { get; set; } = null!; + [Input("port")] + public Input? Port { get; set; } - [Input("protocol", required: true)] - public Input Protocol { get; set; } = null!; + [Input("protocol")] + public Input? Protocol { get; set; } public SqlEndpointOdbcParamsArgs() { diff --git a/sdk/dotnet/Inputs/SqlEndpointOdbcParamsGetArgs.cs b/sdk/dotnet/Inputs/SqlEndpointOdbcParamsGetArgs.cs index c78c7ef6..7205070d 100644 --- a/sdk/dotnet/Inputs/SqlEndpointOdbcParamsGetArgs.cs +++ b/sdk/dotnet/Inputs/SqlEndpointOdbcParamsGetArgs.cs @@ -12,20 +12,17 @@ namespace Pulumi.Databricks.Inputs public sealed class SqlEndpointOdbcParamsGetArgs : global::Pulumi.ResourceArgs { - [Input("host")] - public Input? Host { get; set; } - [Input("hostname")] public Input? Hostname { get; set; } - [Input("path", required: true)] - public Input Path { get; set; } = null!; + [Input("path")] + public Input? Path { get; set; } - [Input("port", required: true)] - public Input Port { get; set; } = null!; + [Input("port")] + public Input? Port { get; set; } - [Input("protocol", required: true)] - public Input Protocol { get; set; } = null!; + [Input("protocol")] + public Input? Protocol { get; set; } public SqlEndpointOdbcParamsGetArgs() { diff --git a/sdk/dotnet/Inputs/SqlEndpointTagsArgs.cs b/sdk/dotnet/Inputs/SqlEndpointTagsArgs.cs index d378da7f..b083f1db 100644 --- a/sdk/dotnet/Inputs/SqlEndpointTagsArgs.cs +++ b/sdk/dotnet/Inputs/SqlEndpointTagsArgs.cs @@ -12,7 +12,7 @@ namespace Pulumi.Databricks.Inputs public sealed class SqlEndpointTagsArgs : global::Pulumi.ResourceArgs { - [Input("customTags", required: true)] + [Input("customTags")] private InputList? _customTags; public InputList CustomTags { diff --git a/sdk/dotnet/Inputs/SqlEndpointTagsGetArgs.cs b/sdk/dotnet/Inputs/SqlEndpointTagsGetArgs.cs index fb2fafa1..94f96d76 100644 --- a/sdk/dotnet/Inputs/SqlEndpointTagsGetArgs.cs +++ b/sdk/dotnet/Inputs/SqlEndpointTagsGetArgs.cs @@ -12,7 +12,7 @@ namespace Pulumi.Databricks.Inputs public sealed class SqlEndpointTagsGetArgs : global::Pulumi.ResourceArgs { - [Input("customTags", required: true)] + [Input("customTags")] private InputList? _customTags; public InputList CustomTags { diff --git a/sdk/dotnet/Metastore.cs b/sdk/dotnet/Metastore.cs index 6a3aceff..e6417458 100644 --- a/sdk/dotnet/Metastore.cs +++ b/sdk/dotnet/Metastore.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be used with account or workspace-level provider. + /// /// A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. /// /// Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). diff --git a/sdk/dotnet/MetastoreAssignment.cs b/sdk/dotnet/MetastoreAssignment.cs index e5b1ada7..9489083b 100644 --- a/sdk/dotnet/MetastoreAssignment.cs +++ b/sdk/dotnet/MetastoreAssignment.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with account-level provider! + /// /// A single databricks.Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. /// /// ## Example Usage diff --git a/sdk/dotnet/MetastoreDataAccess.cs b/sdk/dotnet/MetastoreDataAccess.cs index 37a9c80f..647d0074 100644 --- a/sdk/dotnet/MetastoreDataAccess.cs +++ b/sdk/dotnet/MetastoreDataAccess.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be used with account or workspace-level provider. + /// /// Optionally, each databricks.Metastore can have a default databricks.StorageCredential defined as `databricks.MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined. /// /// ## Import @@ -65,6 +67,9 @@ public partial class MetastoreDataAccess : global::Pulumi.CustomResource [Output("readOnly")] public Output ReadOnly { get; private set; } = null!; + [Output("skipValidation")] + public Output SkipValidation { get; private set; } = null!; + /// /// Create a MetastoreDataAccess resource with the given unique name, arguments, and options. @@ -153,6 +158,9 @@ public sealed class MetastoreDataAccessArgs : global::Pulumi.ResourceArgs [Input("readOnly")] public Input? ReadOnly { get; set; } + [Input("skipValidation")] + public Input? SkipValidation { get; set; } + public MetastoreDataAccessArgs() { } @@ -203,6 +211,9 @@ public sealed class MetastoreDataAccessState : global::Pulumi.ResourceArgs [Input("readOnly")] public Input? ReadOnly { get; set; } + [Input("skipValidation")] + public Input? SkipValidation { get; set; } + public MetastoreDataAccessState() { } diff --git a/sdk/dotnet/MetastoreProvider.cs b/sdk/dotnet/MetastoreProvider.cs index a3373e20..a69f5906 100644 --- a/sdk/dotnet/MetastoreProvider.cs +++ b/sdk/dotnet/MetastoreProvider.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. /// /// A `databricks.MetastoreProvider` is contained within databricks.Metastore and can contain a list of shares that have been shared with you. diff --git a/sdk/dotnet/Outputs/GetCurrentMetastoreMetastoreInfoResult.cs b/sdk/dotnet/Outputs/GetCurrentMetastoreMetastoreInfoResult.cs new file mode 100644 index 00000000..c0481129 --- /dev/null +++ b/sdk/dotnet/Outputs/GetCurrentMetastoreMetastoreInfoResult.cs @@ -0,0 +1,144 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetCurrentMetastoreMetastoreInfoResult + { + public readonly string? Cloud; + /// + /// Timestamp (in milliseconds) when the current metastore was created. + /// + public readonly int? CreatedAt; + /// + /// the ID of the identity that created the current metastore. + /// + public readonly string? CreatedBy; + /// + /// the ID of the default data access configuration. + /// + public readonly string? DefaultDataAccessConfigId; + /// + /// The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + /// + public readonly string? DeltaSharingOrganizationName; + /// + /// the expiration duration in seconds on recipient data access tokens. + /// + public readonly int? DeltaSharingRecipientTokenLifetimeInSeconds; + /// + /// Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + /// + public readonly string? DeltaSharingScope; + /// + /// Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + /// + public readonly string? GlobalMetastoreId; + /// + /// Metastore ID. + /// + public readonly string? MetastoreId; + /// + /// Name of metastore. + /// + public readonly string? Name; + /// + /// Username/group name/sp application_id of the metastore owner. + /// + public readonly string? Owner; + /// + /// the version of the privilege model used by the metastore. + /// + public readonly string? PrivilegeModelVersion; + /// + /// (Mandatory for account-level) The region of the metastore. + /// + public readonly string? Region; + /// + /// Path on cloud storage account, where managed `databricks.Table` are stored. + /// + public readonly string? StorageRoot; + /// + /// ID of a storage credential used for the `storage_root`. + /// + public readonly string? StorageRootCredentialId; + /// + /// Name of a storage credential used for the `storage_root`. + /// + public readonly string? StorageRootCredentialName; + /// + /// Timestamp (in milliseconds) when the current metastore was updated. + /// + public readonly int? UpdatedAt; + /// + /// the ID of the identity that updated the current metastore. + /// + public readonly string? UpdatedBy; + + [OutputConstructor] + private GetCurrentMetastoreMetastoreInfoResult( + string? cloud, + + int? createdAt, + + string? createdBy, + + string? defaultDataAccessConfigId, + + string? deltaSharingOrganizationName, + + int? deltaSharingRecipientTokenLifetimeInSeconds, + + string? deltaSharingScope, + + string? globalMetastoreId, + + string? metastoreId, + + string? name, + + string? owner, + + string? privilegeModelVersion, + + string? region, + + string? storageRoot, + + string? storageRootCredentialId, + + string? storageRootCredentialName, + + int? updatedAt, + + string? updatedBy) + { + Cloud = cloud; + CreatedAt = createdAt; + CreatedBy = createdBy; + DefaultDataAccessConfigId = defaultDataAccessConfigId; + DeltaSharingOrganizationName = deltaSharingOrganizationName; + DeltaSharingRecipientTokenLifetimeInSeconds = deltaSharingRecipientTokenLifetimeInSeconds; + DeltaSharingScope = deltaSharingScope; + GlobalMetastoreId = globalMetastoreId; + MetastoreId = metastoreId; + Name = name; + Owner = owner; + PrivilegeModelVersion = privilegeModelVersion; + Region = region; + StorageRoot = storageRoot; + StorageRootCredentialId = storageRootCredentialId; + StorageRootCredentialName = storageRootCredentialName; + UpdatedAt = updatedAt; + UpdatedBy = updatedBy; + } + } +} diff --git a/sdk/dotnet/Outputs/GetMetastoreMetastoreInfoResult.cs b/sdk/dotnet/Outputs/GetMetastoreMetastoreInfoResult.cs index 6d064fde..8d6cbc87 100644 --- a/sdk/dotnet/Outputs/GetMetastoreMetastoreInfoResult.cs +++ b/sdk/dotnet/Outputs/GetMetastoreMetastoreInfoResult.cs @@ -45,7 +45,7 @@ public sealed class GetMetastoreMetastoreInfoResult public readonly string? PrivilegeModelVersion; public readonly string? Region; /// - /// Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + /// Path on cloud storage account, where managed `databricks.Table` are stored. /// public readonly string? StorageRoot; public readonly string? StorageRootCredentialId; diff --git a/sdk/dotnet/Outputs/GetSqlWarehouseChannelResult.cs b/sdk/dotnet/Outputs/GetSqlWarehouseChannelResult.cs index 76612505..57e88235 100644 --- a/sdk/dotnet/Outputs/GetSqlWarehouseChannelResult.cs +++ b/sdk/dotnet/Outputs/GetSqlWarehouseChannelResult.cs @@ -13,14 +13,19 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class GetSqlWarehouseChannelResult { + public readonly string? DbsqlVersion; /// /// Name of the SQL warehouse to search (case-sensitive). /// public readonly string? Name; [OutputConstructor] - private GetSqlWarehouseChannelResult(string? name) + private GetSqlWarehouseChannelResult( + string? dbsqlVersion, + + string? name) { + DbsqlVersion = dbsqlVersion; Name = name; } } diff --git a/sdk/dotnet/Outputs/GetSqlWarehouseHealthFailureReasonResult.cs b/sdk/dotnet/Outputs/GetSqlWarehouseHealthFailureReasonResult.cs new file mode 100644 index 00000000..e5810f86 --- /dev/null +++ b/sdk/dotnet/Outputs/GetSqlWarehouseHealthFailureReasonResult.cs @@ -0,0 +1,33 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetSqlWarehouseHealthFailureReasonResult + { + public readonly string? Code; + public readonly ImmutableDictionary? Parameters; + public readonly string? Type; + + [OutputConstructor] + private GetSqlWarehouseHealthFailureReasonResult( + string? code, + + ImmutableDictionary? parameters, + + string? type) + { + Code = code; + Parameters = parameters; + Type = type; + } + } +} diff --git a/sdk/dotnet/Outputs/GetSqlWarehouseHealthResult.cs b/sdk/dotnet/Outputs/GetSqlWarehouseHealthResult.cs new file mode 100644 index 00000000..26bc9d67 --- /dev/null +++ b/sdk/dotnet/Outputs/GetSqlWarehouseHealthResult.cs @@ -0,0 +1,41 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetSqlWarehouseHealthResult + { + public readonly string? Details; + public readonly Outputs.GetSqlWarehouseHealthFailureReasonResult? FailureReason; + public readonly string? Message; + public readonly string? Status; + public readonly string? Summary; + + [OutputConstructor] + private GetSqlWarehouseHealthResult( + string? details, + + Outputs.GetSqlWarehouseHealthFailureReasonResult? failureReason, + + string? message, + + string? status, + + string? summary) + { + Details = details; + FailureReason = failureReason; + Message = message; + Status = status; + Summary = summary; + } + } +} diff --git a/sdk/dotnet/Outputs/GetSqlWarehouseOdbcParamsResult.cs b/sdk/dotnet/Outputs/GetSqlWarehouseOdbcParamsResult.cs index 0c40a99a..a0d92059 100644 --- a/sdk/dotnet/Outputs/GetSqlWarehouseOdbcParamsResult.cs +++ b/sdk/dotnet/Outputs/GetSqlWarehouseOdbcParamsResult.cs @@ -13,25 +13,21 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class GetSqlWarehouseOdbcParamsResult { - public readonly string? Host; public readonly string? Hostname; - public readonly string Path; - public readonly int Port; - public readonly string Protocol; + public readonly string? Path; + public readonly int? Port; + public readonly string? Protocol; [OutputConstructor] private GetSqlWarehouseOdbcParamsResult( - string? host, - string? hostname, - string path, + string? path, - int port, + int? port, - string protocol) + string? protocol) { - Host = host; Hostname = hostname; Path = path; Port = port; diff --git a/sdk/dotnet/Outputs/GetSqlWarehouseTagsCustomTagResult.cs b/sdk/dotnet/Outputs/GetSqlWarehouseTagsCustomTagResult.cs index 3f968a91..9af53397 100644 --- a/sdk/dotnet/Outputs/GetSqlWarehouseTagsCustomTagResult.cs +++ b/sdk/dotnet/Outputs/GetSqlWarehouseTagsCustomTagResult.cs @@ -13,14 +13,14 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class GetSqlWarehouseTagsCustomTagResult { - public readonly string Key; - public readonly string Value; + public readonly string? Key; + public readonly string? Value; [OutputConstructor] private GetSqlWarehouseTagsCustomTagResult( - string key, + string? key, - string value) + string? value) { Key = key; Value = value; diff --git a/sdk/dotnet/Outputs/SqlEndpointChannel.cs b/sdk/dotnet/Outputs/SqlEndpointChannel.cs index 989659a9..1d96abed 100644 --- a/sdk/dotnet/Outputs/SqlEndpointChannel.cs +++ b/sdk/dotnet/Outputs/SqlEndpointChannel.cs @@ -13,14 +13,19 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class SqlEndpointChannel { + public readonly string? DbsqlVersion; /// /// Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. /// public readonly string? Name; [OutputConstructor] - private SqlEndpointChannel(string? name) + private SqlEndpointChannel( + string? dbsqlVersion, + + string? name) { + DbsqlVersion = dbsqlVersion; Name = name; } } diff --git a/sdk/dotnet/Outputs/SqlEndpointHealth.cs b/sdk/dotnet/Outputs/SqlEndpointHealth.cs new file mode 100644 index 00000000..bc578f03 --- /dev/null +++ b/sdk/dotnet/Outputs/SqlEndpointHealth.cs @@ -0,0 +1,41 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class SqlEndpointHealth + { + public readonly string? Details; + public readonly Outputs.SqlEndpointHealthFailureReason? FailureReason; + public readonly string? Message; + public readonly string? Status; + public readonly string? Summary; + + [OutputConstructor] + private SqlEndpointHealth( + string? details, + + Outputs.SqlEndpointHealthFailureReason? failureReason, + + string? message, + + string? status, + + string? summary) + { + Details = details; + FailureReason = failureReason; + Message = message; + Status = status; + Summary = summary; + } + } +} diff --git a/sdk/dotnet/Outputs/SqlEndpointHealthFailureReason.cs b/sdk/dotnet/Outputs/SqlEndpointHealthFailureReason.cs new file mode 100644 index 00000000..19e441c9 --- /dev/null +++ b/sdk/dotnet/Outputs/SqlEndpointHealthFailureReason.cs @@ -0,0 +1,33 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class SqlEndpointHealthFailureReason + { + public readonly string? Code; + public readonly ImmutableDictionary? Parameters; + public readonly string? Type; + + [OutputConstructor] + private SqlEndpointHealthFailureReason( + string? code, + + ImmutableDictionary? parameters, + + string? type) + { + Code = code; + Parameters = parameters; + Type = type; + } + } +} diff --git a/sdk/dotnet/Outputs/SqlEndpointOdbcParams.cs b/sdk/dotnet/Outputs/SqlEndpointOdbcParams.cs index 552a81ee..d5502ade 100644 --- a/sdk/dotnet/Outputs/SqlEndpointOdbcParams.cs +++ b/sdk/dotnet/Outputs/SqlEndpointOdbcParams.cs @@ -13,25 +13,21 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class SqlEndpointOdbcParams { - public readonly string? Host; public readonly string? Hostname; - public readonly string Path; - public readonly int Port; - public readonly string Protocol; + public readonly string? Path; + public readonly int? Port; + public readonly string? Protocol; [OutputConstructor] private SqlEndpointOdbcParams( - string? host, - string? hostname, - string path, + string? path, - int port, + int? port, - string protocol) + string? protocol) { - Host = host; Hostname = hostname; Path = path; Port = port; diff --git a/sdk/dotnet/Recipient.cs b/sdk/dotnet/Recipient.cs index 5768ebfe..7ea5af34 100644 --- a/sdk/dotnet/Recipient.cs +++ b/sdk/dotnet/Recipient.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// Within a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to. /// /// A `databricks.Recipient` is contained within databricks.Metastore and can have permissions to `SELECT` from a list of shares. diff --git a/sdk/dotnet/RegisteredModel.cs b/sdk/dotnet/RegisteredModel.cs index 72760d26..2be53fd4 100644 --- a/sdk/dotnet/RegisteredModel.cs +++ b/sdk/dotnet/RegisteredModel.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. /// /// ## Example Usage diff --git a/sdk/dotnet/Repo.cs b/sdk/dotnet/Repo.cs index 26cf19bc..e6affe10 100644 --- a/sdk/dotnet/Repo.cs +++ b/sdk/dotnet/Repo.cs @@ -60,6 +60,12 @@ public partial class Repo : global::Pulumi.CustomResource [Output("url")] public Output Url { get; private set; } = null!; + /// + /// path on Workspace File System (WSFS) in form of `/Workspace` + `path` + /// + [Output("workspacePath")] + public Output WorkspacePath { get; private set; } = null!; + /// /// Create a Repo resource with the given unique name, arguments, and options. @@ -192,6 +198,12 @@ public sealed class RepoState : global::Pulumi.ResourceArgs [Input("url")] public Input? Url { get; set; } + /// + /// path on Workspace File System (WSFS) in form of `/Workspace` + `path` + /// + [Input("workspacePath")] + public Input? WorkspacePath { get; set; } + public RepoState() { } diff --git a/sdk/dotnet/Schema.cs b/sdk/dotnet/Schema.cs index 27a3e07a..3ccd1222 100644 --- a/sdk/dotnet/Schema.cs +++ b/sdk/dotnet/Schema.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. /// /// A `databricks.Schema` is contained within databricks.Catalog and can contain tables & views. diff --git a/sdk/dotnet/SqlEndpoint.cs b/sdk/dotnet/SqlEndpoint.cs index b9e4af13..0b67abbe 100644 --- a/sdk/dotnet/SqlEndpoint.cs +++ b/sdk/dotnet/SqlEndpoint.cs @@ -87,6 +87,12 @@ public partial class SqlEndpoint : global::Pulumi.CustomResource [Output("clusterSize")] public Output ClusterSize { get; private set; } = null!; + /// + /// The username of the user who created the endpoint. + /// + [Output("creatorName")] + public Output CreatorName { get; private set; } = null!; + /// /// ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. /// @@ -109,6 +115,12 @@ public partial class SqlEndpoint : global::Pulumi.CustomResource [Output("enableServerlessCompute")] public Output EnableServerlessCompute { get; private set; } = null!; + /// + /// Health status of the endpoint. + /// + [Output("healths")] + public Output> Healths { get; private set; } = null!; + [Output("instanceProfileArn")] public Output InstanceProfileArn { get; private set; } = null!; @@ -136,8 +148,17 @@ public partial class SqlEndpoint : global::Pulumi.CustomResource [Output("name")] public Output Name { get; private set; } = null!; + /// + /// The current number of clusters used by the endpoint. + /// + [Output("numActiveSessions")] + public Output NumActiveSessions { get; private set; } = null!; + + /// + /// The current number of clusters used by the endpoint. + /// [Output("numClusters")] - public Output NumClusters { get; private set; } = null!; + public Output NumClusters { get; private set; } = null!; /// /// ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -151,6 +172,9 @@ public partial class SqlEndpoint : global::Pulumi.CustomResource [Output("spotInstancePolicy")] public Output SpotInstancePolicy { get; private set; } = null!; + /// + /// The current state of the endpoint. + /// [Output("state")] public Output State { get; private set; } = null!; @@ -161,7 +185,7 @@ public partial class SqlEndpoint : global::Pulumi.CustomResource public Output Tags { get; private set; } = null!; /// - /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. /// [Output("warehouseType")] public Output WarehouseType { get; private set; } = null!; @@ -255,12 +279,6 @@ public sealed class SqlEndpointArgs : global::Pulumi.ResourceArgs [Input("instanceProfileArn")] public Input? InstanceProfileArn { get; set; } - /// - /// JDBC connection string. - /// - [Input("jdbcUrl")] - public Input? JdbcUrl { get; set; } - /// /// Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. /// @@ -279,24 +297,12 @@ public sealed class SqlEndpointArgs : global::Pulumi.ResourceArgs [Input("name")] public Input? Name { get; set; } - [Input("numClusters")] - public Input? NumClusters { get; set; } - - /// - /// ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - /// - [Input("odbcParams")] - public Input? OdbcParams { get; set; } - /// /// The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. /// [Input("spotInstancePolicy")] public Input? SpotInstancePolicy { get; set; } - [Input("state")] - public Input? State { get; set; } - /// /// Databricks tags all endpoint resources with these tags. /// @@ -304,7 +310,7 @@ public sealed class SqlEndpointArgs : global::Pulumi.ResourceArgs public Input? Tags { get; set; } /// - /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. /// [Input("warehouseType")] public Input? WarehouseType { get; set; } @@ -335,6 +341,12 @@ public sealed class SqlEndpointState : global::Pulumi.ResourceArgs [Input("clusterSize")] public Input? ClusterSize { get; set; } + /// + /// The username of the user who created the endpoint. + /// + [Input("creatorName")] + public Input? CreatorName { get; set; } + /// /// ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. /// @@ -357,6 +369,18 @@ public sealed class SqlEndpointState : global::Pulumi.ResourceArgs [Input("enableServerlessCompute")] public Input? EnableServerlessCompute { get; set; } + [Input("healths")] + private InputList? _healths; + + /// + /// Health status of the endpoint. + /// + public InputList Healths + { + get => _healths ?? (_healths = new InputList()); + set => _healths = value; + } + [Input("instanceProfileArn")] public Input? InstanceProfileArn { get; set; } @@ -384,6 +408,15 @@ public sealed class SqlEndpointState : global::Pulumi.ResourceArgs [Input("name")] public Input? Name { get; set; } + /// + /// The current number of clusters used by the endpoint. + /// + [Input("numActiveSessions")] + public Input? NumActiveSessions { get; set; } + + /// + /// The current number of clusters used by the endpoint. + /// [Input("numClusters")] public Input? NumClusters { get; set; } @@ -399,6 +432,9 @@ public sealed class SqlEndpointState : global::Pulumi.ResourceArgs [Input("spotInstancePolicy")] public Input? SpotInstancePolicy { get; set; } + /// + /// The current state of the endpoint. + /// [Input("state")] public Input? State { get; set; } @@ -409,7 +445,7 @@ public sealed class SqlEndpointState : global::Pulumi.ResourceArgs public Input? Tags { get; set; } /// - /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + /// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. /// [Input("warehouseType")] public Input? WarehouseType { get; set; } diff --git a/sdk/dotnet/StorageCredential.cs b/sdk/dotnet/StorageCredential.cs index 7f326e65..8b11fe6f 100644 --- a/sdk/dotnet/StorageCredential.cs +++ b/sdk/dotnet/StorageCredential.cs @@ -10,6 +10,8 @@ namespace Pulumi.Databricks { /// + /// > **Note** This resource could be used with account or workspace-level provider. + /// /// To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: /// /// - `databricks.StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. @@ -191,6 +193,12 @@ public partial class StorageCredential : global::Pulumi.CustomResource [Output("readOnly")] public Output ReadOnly { get; private set; } = null!; + /// + /// Suppress validation errors if any & force save the storage credential. + /// + [Output("skipValidation")] + public Output SkipValidation { get; private set; } = null!; + /// /// Create a StorageCredential resource with the given unique name, arguments, and options. @@ -290,6 +298,12 @@ public sealed class StorageCredentialArgs : global::Pulumi.ResourceArgs [Input("readOnly")] public Input? ReadOnly { get; set; } + /// + /// Suppress validation errors if any & force save the storage credential. + /// + [Input("skipValidation")] + public Input? SkipValidation { get; set; } + public StorageCredentialArgs() { } @@ -351,6 +365,12 @@ public sealed class StorageCredentialState : global::Pulumi.ResourceArgs [Input("readOnly")] public Input? ReadOnly { get; set; } + /// + /// Suppress validation errors if any & force save the storage credential. + /// + [Input("skipValidation")] + public Input? SkipValidation { get; set; } + public StorageCredentialState() { } diff --git a/sdk/dotnet/SystemSchema.cs b/sdk/dotnet/SystemSchema.cs index 3b1bd608..e8592753 100644 --- a/sdk/dotnet/SystemSchema.cs +++ b/sdk/dotnet/SystemSchema.cs @@ -12,8 +12,7 @@ namespace Pulumi.Databricks /// /// > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). /// - /// > **Notes** - /// Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. + /// > **Note** This resource could be only used with workspace-level provider! /// /// Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. /// diff --git a/sdk/dotnet/Volume.cs b/sdk/dotnet/Volume.cs index df2fa459..48111b4c 100644 --- a/sdk/dotnet/Volume.cs +++ b/sdk/dotnet/Volume.cs @@ -12,6 +12,8 @@ namespace Pulumi.Databricks /// /// > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). /// + /// > **Note** This resource could be only used with workspace-level provider! + /// /// Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. /// /// A volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog. diff --git a/sdk/go/databricks/accessControlRuleSet.go b/sdk/go/databricks/accessControlRuleSet.go index c150abae..0cbf8f6c 100644 --- a/sdk/go/databricks/accessControlRuleSet.go +++ b/sdk/go/databricks/accessControlRuleSet.go @@ -11,6 +11,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be used with account or workspace-level provider. +// // This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. // // > **Note** Currently, we only support managing access rules on service principal, group and account resources through `AccessControlRuleSet`. diff --git a/sdk/go/databricks/connection.go b/sdk/go/databricks/connection.go index 776feee0..50f8960a 100644 --- a/sdk/go/databricks/connection.go +++ b/sdk/go/databricks/connection.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with workspace-level provider! +// // Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: // // - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. diff --git a/sdk/go/databricks/defaultNamespaceSetting.go b/sdk/go/databricks/defaultNamespaceSetting.go index 7e3945ee..6a35b8b4 100644 --- a/sdk/go/databricks/defaultNamespaceSetting.go +++ b/sdk/go/databricks/defaultNamespaceSetting.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with workspace-level provider! +// // The `DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. // Setting the default catalog for the workspace determines the catalog that is used when queries do not reference // a fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query diff --git a/sdk/go/databricks/directory.go b/sdk/go/databricks/directory.go index 9a3f59b1..0f879e30 100644 --- a/sdk/go/databricks/directory.go +++ b/sdk/go/databricks/directory.go @@ -29,6 +29,8 @@ type Directory struct { ObjectId pulumi.IntOutput `pulumi:"objectId"` // The absolute path of the directory, beginning with "/", e.g. "/Demo". Path pulumi.StringOutput `pulumi:"path"` + // path on Workspace File System (WSFS) in form of `/Workspace` + `path` + WorkspacePath pulumi.StringOutput `pulumi:"workspacePath"` } // NewDirectory registers a new resource with the given unique name, arguments, and options. @@ -69,6 +71,8 @@ type directoryState struct { ObjectId *int `pulumi:"objectId"` // The absolute path of the directory, beginning with "/", e.g. "/Demo". Path *string `pulumi:"path"` + // path on Workspace File System (WSFS) in form of `/Workspace` + `path` + WorkspacePath *string `pulumi:"workspacePath"` } type DirectoryState struct { @@ -77,6 +81,8 @@ type DirectoryState struct { ObjectId pulumi.IntPtrInput // The absolute path of the directory, beginning with "/", e.g. "/Demo". Path pulumi.StringPtrInput + // path on Workspace File System (WSFS) in form of `/Workspace` + `path` + WorkspacePath pulumi.StringPtrInput } func (DirectoryState) ElementType() reflect.Type { @@ -201,6 +207,11 @@ func (o DirectoryOutput) Path() pulumi.StringOutput { return o.ApplyT(func(v *Directory) pulumi.StringOutput { return v.Path }).(pulumi.StringOutput) } +// path on Workspace File System (WSFS) in form of `/Workspace` + `path` +func (o DirectoryOutput) WorkspacePath() pulumi.StringOutput { + return o.ApplyT(func(v *Directory) pulumi.StringOutput { return v.WorkspacePath }).(pulumi.StringOutput) +} + type DirectoryArrayOutput struct{ *pulumi.OutputState } func (DirectoryArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/externalLocation.go b/sdk/go/databricks/externalLocation.go index 2d11f2a2..dee0cf90 100644 --- a/sdk/go/databricks/externalLocation.go +++ b/sdk/go/databricks/externalLocation.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with workspace-level provider! +// // To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: // // - StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. diff --git a/sdk/go/databricks/getCurrentMetastore.go b/sdk/go/databricks/getCurrentMetastore.go new file mode 100644 index 00000000..bb29b6b5 --- /dev/null +++ b/sdk/go/databricks/getCurrentMetastore.go @@ -0,0 +1,132 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package databricks + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-databricks/sdk/go/databricks/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Retrieves information about metastore attached to a given workspace. +// +// > **Note** This is the workspace-level data source. +// +// > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add dependsOn attribute to prevent _authentication is not configured for provider_ errors. +// +// ## Example Usage +// +// MetastoreSummary response for a metastore attached to the current workspace. +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := databricks.GetCurrentMetastore(ctx, nil, nil) +// if err != nil { +// return err +// } +// ctx.Export("someMetastore", data.Databricks_metastore.This.Metastore_info[0]) +// return nil +// }) +// } +// +// ``` +// ## Related Resources +// +// The following resources are used in the same context: +// +// * Metastore to get information for a metastore with a given ID. +// * getMetastores to get a mapping of name to id of all metastores. +// * Metastore to manage Metastores within Unity Catalog. +// * Catalog to manage catalogs within Unity Catalog. +func GetCurrentMetastore(ctx *pulumi.Context, args *GetCurrentMetastoreArgs, opts ...pulumi.InvokeOption) (*GetCurrentMetastoreResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv GetCurrentMetastoreResult + err := ctx.Invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +// A collection of arguments for invoking getCurrentMetastore. +type GetCurrentMetastoreArgs struct { + // metastore ID. + Id *string `pulumi:"id"` + // summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + MetastoreInfo *GetCurrentMetastoreMetastoreInfo `pulumi:"metastoreInfo"` +} + +// A collection of values returned by getCurrentMetastore. +type GetCurrentMetastoreResult struct { + // metastore ID. + Id string `pulumi:"id"` + // summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + MetastoreInfo GetCurrentMetastoreMetastoreInfo `pulumi:"metastoreInfo"` +} + +func GetCurrentMetastoreOutput(ctx *pulumi.Context, args GetCurrentMetastoreOutputArgs, opts ...pulumi.InvokeOption) GetCurrentMetastoreResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (GetCurrentMetastoreResult, error) { + args := v.(GetCurrentMetastoreArgs) + r, err := GetCurrentMetastore(ctx, &args, opts...) + var s GetCurrentMetastoreResult + if r != nil { + s = *r + } + return s, err + }).(GetCurrentMetastoreResultOutput) +} + +// A collection of arguments for invoking getCurrentMetastore. +type GetCurrentMetastoreOutputArgs struct { + // metastore ID. + Id pulumi.StringPtrInput `pulumi:"id"` + // summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + MetastoreInfo GetCurrentMetastoreMetastoreInfoPtrInput `pulumi:"metastoreInfo"` +} + +func (GetCurrentMetastoreOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCurrentMetastoreArgs)(nil)).Elem() +} + +// A collection of values returned by getCurrentMetastore. +type GetCurrentMetastoreResultOutput struct{ *pulumi.OutputState } + +func (GetCurrentMetastoreResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCurrentMetastoreResult)(nil)).Elem() +} + +func (o GetCurrentMetastoreResultOutput) ToGetCurrentMetastoreResultOutput() GetCurrentMetastoreResultOutput { + return o +} + +func (o GetCurrentMetastoreResultOutput) ToGetCurrentMetastoreResultOutputWithContext(ctx context.Context) GetCurrentMetastoreResultOutput { + return o +} + +// metastore ID. +func (o GetCurrentMetastoreResultOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetCurrentMetastoreResult) string { return v.Id }).(pulumi.StringOutput) +} + +// summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): +func (o GetCurrentMetastoreResultOutput) MetastoreInfo() GetCurrentMetastoreMetastoreInfoOutput { + return o.ApplyT(func(v GetCurrentMetastoreResult) GetCurrentMetastoreMetastoreInfo { return v.MetastoreInfo }).(GetCurrentMetastoreMetastoreInfoOutput) +} + +func init() { + pulumi.RegisterOutputType(GetCurrentMetastoreResultOutput{}) +} diff --git a/sdk/go/databricks/getDirectory.go b/sdk/go/databricks/getDirectory.go index c7c2b4e4..7de1344a 100644 --- a/sdk/go/databricks/getDirectory.go +++ b/sdk/go/databricks/getDirectory.go @@ -65,6 +65,8 @@ type LookupDirectoryResult struct { // directory object ID ObjectId int `pulumi:"objectId"` Path string `pulumi:"path"` + // path on Workspace File System (WSFS) in form of `/Workspace` + `path` + WorkspacePath string `pulumi:"workspacePath"` } func LookupDirectoryOutput(ctx *pulumi.Context, args LookupDirectoryOutputArgs, opts ...pulumi.InvokeOption) LookupDirectoryResultOutput { @@ -121,6 +123,11 @@ func (o LookupDirectoryResultOutput) Path() pulumi.StringOutput { return o.ApplyT(func(v LookupDirectoryResult) string { return v.Path }).(pulumi.StringOutput) } +// path on Workspace File System (WSFS) in form of `/Workspace` + `path` +func (o LookupDirectoryResultOutput) WorkspacePath() pulumi.StringOutput { + return o.ApplyT(func(v LookupDirectoryResult) string { return v.WorkspacePath }).(pulumi.StringOutput) +} + func init() { pulumi.RegisterOutputType(LookupDirectoryResultOutput{}) } diff --git a/sdk/go/databricks/getServicePrincipal.go b/sdk/go/databricks/getServicePrincipal.go index af57ec18..1a64bfce 100644 --- a/sdk/go/databricks/getServicePrincipal.go +++ b/sdk/go/databricks/getServicePrincipal.go @@ -85,7 +85,7 @@ type LookupServicePrincipalArgs struct { Active *bool `pulumi:"active"` // ID of the service principal. The service principal must exist before this resource can be retrieved. ApplicationId *string `pulumi:"applicationId"` - // Display name of the service principal, e.g. `Foo SPN`. + // Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. DisplayName *string `pulumi:"displayName"` // ID of the service principal in an external identity provider. ExternalId *string `pulumi:"externalId"` @@ -139,7 +139,7 @@ type LookupServicePrincipalOutputArgs struct { Active pulumi.BoolPtrInput `pulumi:"active"` // ID of the service principal. The service principal must exist before this resource can be retrieved. ApplicationId pulumi.StringPtrInput `pulumi:"applicationId"` - // Display name of the service principal, e.g. `Foo SPN`. + // Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. DisplayName pulumi.StringPtrInput `pulumi:"displayName"` // ID of the service principal in an external identity provider. ExternalId pulumi.StringPtrInput `pulumi:"externalId"` diff --git a/sdk/go/databricks/getSqlWarehouse.go b/sdk/go/databricks/getSqlWarehouse.go index a3376840..06d15111 100644 --- a/sdk/go/databricks/getSqlWarehouse.go +++ b/sdk/go/databricks/getSqlWarehouse.go @@ -94,12 +94,16 @@ type GetSqlWarehouseArgs struct { Channel *GetSqlWarehouseChannel `pulumi:"channel"` // The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". ClusterSize *string `pulumi:"clusterSize"` + // The username of the user who created the endpoint. + CreatorName *string `pulumi:"creatorName"` // ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. DataSourceId *string `pulumi:"dataSourceId"` // Whether [Photon](https://databricks.com/product/delta-engine) is enabled. EnablePhoton *bool `pulumi:"enablePhoton"` // Whether this SQL warehouse is a serverless SQL warehouse. EnableServerlessCompute *bool `pulumi:"enableServerlessCompute"` + // Health status of the endpoint. + Health *GetSqlWarehouseHealth `pulumi:"health"` // The ID of the SQL warehouse. Id *string `pulumi:"id"` InstanceProfileArn *string `pulumi:"instanceProfileArn"` @@ -110,15 +114,21 @@ type GetSqlWarehouseArgs struct { // Minimum number of clusters available when a SQL warehouse is running. MinNumClusters *int `pulumi:"minNumClusters"` // Name of the SQL warehouse to search (case-sensitive). - Name *string `pulumi:"name"` - NumClusters *int `pulumi:"numClusters"` + Name *string `pulumi:"name"` + // The current number of clusters used by the endpoint. + NumActiveSessions *int `pulumi:"numActiveSessions"` + // The current number of clusters used by the endpoint. + NumClusters *int `pulumi:"numClusters"` // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. OdbcParams *GetSqlWarehouseOdbcParams `pulumi:"odbcParams"` // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. SpotInstancePolicy *string `pulumi:"spotInstancePolicy"` - State *string `pulumi:"state"` + // The current state of the endpoint. + State *string `pulumi:"state"` // tags used for SQL warehouse resources. Tags *GetSqlWarehouseTags `pulumi:"tags"` + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + WarehouseType *string `pulumi:"warehouseType"` } // A collection of values returned by getSqlWarehouse. @@ -129,12 +139,16 @@ type GetSqlWarehouseResult struct { Channel GetSqlWarehouseChannel `pulumi:"channel"` // The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". ClusterSize string `pulumi:"clusterSize"` + // The username of the user who created the endpoint. + CreatorName string `pulumi:"creatorName"` // ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. DataSourceId string `pulumi:"dataSourceId"` // Whether [Photon](https://databricks.com/product/delta-engine) is enabled. EnablePhoton bool `pulumi:"enablePhoton"` // Whether this SQL warehouse is a serverless SQL warehouse. EnableServerlessCompute bool `pulumi:"enableServerlessCompute"` + // Health status of the endpoint. + Health GetSqlWarehouseHealth `pulumi:"health"` // The ID of the SQL warehouse. Id string `pulumi:"id"` InstanceProfileArn string `pulumi:"instanceProfileArn"` @@ -145,15 +159,21 @@ type GetSqlWarehouseResult struct { // Minimum number of clusters available when a SQL warehouse is running. MinNumClusters int `pulumi:"minNumClusters"` // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - Name string `pulumi:"name"` - NumClusters int `pulumi:"numClusters"` + Name string `pulumi:"name"` + // The current number of clusters used by the endpoint. + NumActiveSessions int `pulumi:"numActiveSessions"` + // The current number of clusters used by the endpoint. + NumClusters int `pulumi:"numClusters"` // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. OdbcParams GetSqlWarehouseOdbcParams `pulumi:"odbcParams"` // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. SpotInstancePolicy string `pulumi:"spotInstancePolicy"` - State string `pulumi:"state"` + // The current state of the endpoint. + State string `pulumi:"state"` // tags used for SQL warehouse resources. Tags GetSqlWarehouseTags `pulumi:"tags"` + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + WarehouseType string `pulumi:"warehouseType"` } func GetSqlWarehouseOutput(ctx *pulumi.Context, args GetSqlWarehouseOutputArgs, opts ...pulumi.InvokeOption) GetSqlWarehouseResultOutput { @@ -177,12 +197,16 @@ type GetSqlWarehouseOutputArgs struct { Channel GetSqlWarehouseChannelPtrInput `pulumi:"channel"` // The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". ClusterSize pulumi.StringPtrInput `pulumi:"clusterSize"` + // The username of the user who created the endpoint. + CreatorName pulumi.StringPtrInput `pulumi:"creatorName"` // ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. DataSourceId pulumi.StringPtrInput `pulumi:"dataSourceId"` // Whether [Photon](https://databricks.com/product/delta-engine) is enabled. EnablePhoton pulumi.BoolPtrInput `pulumi:"enablePhoton"` // Whether this SQL warehouse is a serverless SQL warehouse. EnableServerlessCompute pulumi.BoolPtrInput `pulumi:"enableServerlessCompute"` + // Health status of the endpoint. + Health GetSqlWarehouseHealthPtrInput `pulumi:"health"` // The ID of the SQL warehouse. Id pulumi.StringPtrInput `pulumi:"id"` InstanceProfileArn pulumi.StringPtrInput `pulumi:"instanceProfileArn"` @@ -193,15 +217,21 @@ type GetSqlWarehouseOutputArgs struct { // Minimum number of clusters available when a SQL warehouse is running. MinNumClusters pulumi.IntPtrInput `pulumi:"minNumClusters"` // Name of the SQL warehouse to search (case-sensitive). - Name pulumi.StringPtrInput `pulumi:"name"` - NumClusters pulumi.IntPtrInput `pulumi:"numClusters"` + Name pulumi.StringPtrInput `pulumi:"name"` + // The current number of clusters used by the endpoint. + NumActiveSessions pulumi.IntPtrInput `pulumi:"numActiveSessions"` + // The current number of clusters used by the endpoint. + NumClusters pulumi.IntPtrInput `pulumi:"numClusters"` // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. OdbcParams GetSqlWarehouseOdbcParamsPtrInput `pulumi:"odbcParams"` // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. SpotInstancePolicy pulumi.StringPtrInput `pulumi:"spotInstancePolicy"` - State pulumi.StringPtrInput `pulumi:"state"` + // The current state of the endpoint. + State pulumi.StringPtrInput `pulumi:"state"` // tags used for SQL warehouse resources. Tags GetSqlWarehouseTagsPtrInput `pulumi:"tags"` + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + WarehouseType pulumi.StringPtrInput `pulumi:"warehouseType"` } func (GetSqlWarehouseOutputArgs) ElementType() reflect.Type { @@ -238,6 +268,11 @@ func (o GetSqlWarehouseResultOutput) ClusterSize() pulumi.StringOutput { return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.ClusterSize }).(pulumi.StringOutput) } +// The username of the user who created the endpoint. +func (o GetSqlWarehouseResultOutput) CreatorName() pulumi.StringOutput { + return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.CreatorName }).(pulumi.StringOutput) +} + // ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. func (o GetSqlWarehouseResultOutput) DataSourceId() pulumi.StringOutput { return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.DataSourceId }).(pulumi.StringOutput) @@ -253,6 +288,11 @@ func (o GetSqlWarehouseResultOutput) EnableServerlessCompute() pulumi.BoolOutput return o.ApplyT(func(v GetSqlWarehouseResult) bool { return v.EnableServerlessCompute }).(pulumi.BoolOutput) } +// Health status of the endpoint. +func (o GetSqlWarehouseResultOutput) Health() GetSqlWarehouseHealthOutput { + return o.ApplyT(func(v GetSqlWarehouseResult) GetSqlWarehouseHealth { return v.Health }).(GetSqlWarehouseHealthOutput) +} + // The ID of the SQL warehouse. func (o GetSqlWarehouseResultOutput) Id() pulumi.StringOutput { return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.Id }).(pulumi.StringOutput) @@ -282,6 +322,12 @@ func (o GetSqlWarehouseResultOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.Name }).(pulumi.StringOutput) } +// The current number of clusters used by the endpoint. +func (o GetSqlWarehouseResultOutput) NumActiveSessions() pulumi.IntOutput { + return o.ApplyT(func(v GetSqlWarehouseResult) int { return v.NumActiveSessions }).(pulumi.IntOutput) +} + +// The current number of clusters used by the endpoint. func (o GetSqlWarehouseResultOutput) NumClusters() pulumi.IntOutput { return o.ApplyT(func(v GetSqlWarehouseResult) int { return v.NumClusters }).(pulumi.IntOutput) } @@ -296,6 +342,7 @@ func (o GetSqlWarehouseResultOutput) SpotInstancePolicy() pulumi.StringOutput { return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.SpotInstancePolicy }).(pulumi.StringOutput) } +// The current state of the endpoint. func (o GetSqlWarehouseResultOutput) State() pulumi.StringOutput { return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.State }).(pulumi.StringOutput) } @@ -305,6 +352,11 @@ func (o GetSqlWarehouseResultOutput) Tags() GetSqlWarehouseTagsOutput { return o.ApplyT(func(v GetSqlWarehouseResult) GetSqlWarehouseTags { return v.Tags }).(GetSqlWarehouseTagsOutput) } +// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). +func (o GetSqlWarehouseResultOutput) WarehouseType() pulumi.StringOutput { + return o.ApplyT(func(v GetSqlWarehouseResult) string { return v.WarehouseType }).(pulumi.StringOutput) +} + func init() { pulumi.RegisterOutputType(GetSqlWarehouseResultOutput{}) } diff --git a/sdk/go/databricks/grant.go b/sdk/go/databricks/grant.go new file mode 100644 index 00000000..f367360f --- /dev/null +++ b/sdk/go/databricks/grant.go @@ -0,0 +1,341 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package databricks + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-databricks/sdk/go/databricks/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +type Grant struct { + pulumi.CustomResourceState + + Catalog pulumi.StringPtrOutput `pulumi:"catalog"` + ExternalLocation pulumi.StringPtrOutput `pulumi:"externalLocation"` + ForeignConnection pulumi.StringPtrOutput `pulumi:"foreignConnection"` + Function pulumi.StringPtrOutput `pulumi:"function"` + Metastore pulumi.StringPtrOutput `pulumi:"metastore"` + Model pulumi.StringPtrOutput `pulumi:"model"` + Pipeline pulumi.StringPtrOutput `pulumi:"pipeline"` + Principal pulumi.StringOutput `pulumi:"principal"` + Privileges pulumi.StringArrayOutput `pulumi:"privileges"` + Recipient pulumi.StringPtrOutput `pulumi:"recipient"` + Schema pulumi.StringPtrOutput `pulumi:"schema"` + Share pulumi.StringPtrOutput `pulumi:"share"` + StorageCredential pulumi.StringPtrOutput `pulumi:"storageCredential"` + Table pulumi.StringPtrOutput `pulumi:"table"` + Volume pulumi.StringPtrOutput `pulumi:"volume"` +} + +// NewGrant registers a new resource with the given unique name, arguments, and options. +func NewGrant(ctx *pulumi.Context, + name string, args *GrantArgs, opts ...pulumi.ResourceOption) (*Grant, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.Principal == nil { + return nil, errors.New("invalid value for required argument 'Principal'") + } + if args.Privileges == nil { + return nil, errors.New("invalid value for required argument 'Privileges'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource Grant + err := ctx.RegisterResource("databricks:index/grant:Grant", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetGrant gets an existing Grant resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetGrant(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *GrantState, opts ...pulumi.ResourceOption) (*Grant, error) { + var resource Grant + err := ctx.ReadResource("databricks:index/grant:Grant", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering Grant resources. +type grantState struct { + Catalog *string `pulumi:"catalog"` + ExternalLocation *string `pulumi:"externalLocation"` + ForeignConnection *string `pulumi:"foreignConnection"` + Function *string `pulumi:"function"` + Metastore *string `pulumi:"metastore"` + Model *string `pulumi:"model"` + Pipeline *string `pulumi:"pipeline"` + Principal *string `pulumi:"principal"` + Privileges []string `pulumi:"privileges"` + Recipient *string `pulumi:"recipient"` + Schema *string `pulumi:"schema"` + Share *string `pulumi:"share"` + StorageCredential *string `pulumi:"storageCredential"` + Table *string `pulumi:"table"` + Volume *string `pulumi:"volume"` +} + +type GrantState struct { + Catalog pulumi.StringPtrInput + ExternalLocation pulumi.StringPtrInput + ForeignConnection pulumi.StringPtrInput + Function pulumi.StringPtrInput + Metastore pulumi.StringPtrInput + Model pulumi.StringPtrInput + Pipeline pulumi.StringPtrInput + Principal pulumi.StringPtrInput + Privileges pulumi.StringArrayInput + Recipient pulumi.StringPtrInput + Schema pulumi.StringPtrInput + Share pulumi.StringPtrInput + StorageCredential pulumi.StringPtrInput + Table pulumi.StringPtrInput + Volume pulumi.StringPtrInput +} + +func (GrantState) ElementType() reflect.Type { + return reflect.TypeOf((*grantState)(nil)).Elem() +} + +type grantArgs struct { + Catalog *string `pulumi:"catalog"` + ExternalLocation *string `pulumi:"externalLocation"` + ForeignConnection *string `pulumi:"foreignConnection"` + Function *string `pulumi:"function"` + Metastore *string `pulumi:"metastore"` + Model *string `pulumi:"model"` + Pipeline *string `pulumi:"pipeline"` + Principal string `pulumi:"principal"` + Privileges []string `pulumi:"privileges"` + Recipient *string `pulumi:"recipient"` + Schema *string `pulumi:"schema"` + Share *string `pulumi:"share"` + StorageCredential *string `pulumi:"storageCredential"` + Table *string `pulumi:"table"` + Volume *string `pulumi:"volume"` +} + +// The set of arguments for constructing a Grant resource. +type GrantArgs struct { + Catalog pulumi.StringPtrInput + ExternalLocation pulumi.StringPtrInput + ForeignConnection pulumi.StringPtrInput + Function pulumi.StringPtrInput + Metastore pulumi.StringPtrInput + Model pulumi.StringPtrInput + Pipeline pulumi.StringPtrInput + Principal pulumi.StringInput + Privileges pulumi.StringArrayInput + Recipient pulumi.StringPtrInput + Schema pulumi.StringPtrInput + Share pulumi.StringPtrInput + StorageCredential pulumi.StringPtrInput + Table pulumi.StringPtrInput + Volume pulumi.StringPtrInput +} + +func (GrantArgs) ElementType() reflect.Type { + return reflect.TypeOf((*grantArgs)(nil)).Elem() +} + +type GrantInput interface { + pulumi.Input + + ToGrantOutput() GrantOutput + ToGrantOutputWithContext(ctx context.Context) GrantOutput +} + +func (*Grant) ElementType() reflect.Type { + return reflect.TypeOf((**Grant)(nil)).Elem() +} + +func (i *Grant) ToGrantOutput() GrantOutput { + return i.ToGrantOutputWithContext(context.Background()) +} + +func (i *Grant) ToGrantOutputWithContext(ctx context.Context) GrantOutput { + return pulumi.ToOutputWithContext(ctx, i).(GrantOutput) +} + +// GrantArrayInput is an input type that accepts GrantArray and GrantArrayOutput values. +// You can construct a concrete instance of `GrantArrayInput` via: +// +// GrantArray{ GrantArgs{...} } +type GrantArrayInput interface { + pulumi.Input + + ToGrantArrayOutput() GrantArrayOutput + ToGrantArrayOutputWithContext(context.Context) GrantArrayOutput +} + +type GrantArray []GrantInput + +func (GrantArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*Grant)(nil)).Elem() +} + +func (i GrantArray) ToGrantArrayOutput() GrantArrayOutput { + return i.ToGrantArrayOutputWithContext(context.Background()) +} + +func (i GrantArray) ToGrantArrayOutputWithContext(ctx context.Context) GrantArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GrantArrayOutput) +} + +// GrantMapInput is an input type that accepts GrantMap and GrantMapOutput values. +// You can construct a concrete instance of `GrantMapInput` via: +// +// GrantMap{ "key": GrantArgs{...} } +type GrantMapInput interface { + pulumi.Input + + ToGrantMapOutput() GrantMapOutput + ToGrantMapOutputWithContext(context.Context) GrantMapOutput +} + +type GrantMap map[string]GrantInput + +func (GrantMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*Grant)(nil)).Elem() +} + +func (i GrantMap) ToGrantMapOutput() GrantMapOutput { + return i.ToGrantMapOutputWithContext(context.Background()) +} + +func (i GrantMap) ToGrantMapOutputWithContext(ctx context.Context) GrantMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(GrantMapOutput) +} + +type GrantOutput struct{ *pulumi.OutputState } + +func (GrantOutput) ElementType() reflect.Type { + return reflect.TypeOf((**Grant)(nil)).Elem() +} + +func (o GrantOutput) ToGrantOutput() GrantOutput { + return o +} + +func (o GrantOutput) ToGrantOutputWithContext(ctx context.Context) GrantOutput { + return o +} + +func (o GrantOutput) Catalog() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Catalog }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) ExternalLocation() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.ExternalLocation }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) ForeignConnection() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.ForeignConnection }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Function() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Function }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Metastore() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Metastore }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Model() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Model }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Pipeline() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Pipeline }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Principal() pulumi.StringOutput { + return o.ApplyT(func(v *Grant) pulumi.StringOutput { return v.Principal }).(pulumi.StringOutput) +} + +func (o GrantOutput) Privileges() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Grant) pulumi.StringArrayOutput { return v.Privileges }).(pulumi.StringArrayOutput) +} + +func (o GrantOutput) Recipient() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Recipient }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Schema() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Schema }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Share() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Share }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) StorageCredential() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.StorageCredential }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Table() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Table }).(pulumi.StringPtrOutput) +} + +func (o GrantOutput) Volume() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grant) pulumi.StringPtrOutput { return v.Volume }).(pulumi.StringPtrOutput) +} + +type GrantArrayOutput struct{ *pulumi.OutputState } + +func (GrantArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*Grant)(nil)).Elem() +} + +func (o GrantArrayOutput) ToGrantArrayOutput() GrantArrayOutput { + return o +} + +func (o GrantArrayOutput) ToGrantArrayOutputWithContext(ctx context.Context) GrantArrayOutput { + return o +} + +func (o GrantArrayOutput) Index(i pulumi.IntInput) GrantOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *Grant { + return vs[0].([]*Grant)[vs[1].(int)] + }).(GrantOutput) +} + +type GrantMapOutput struct{ *pulumi.OutputState } + +func (GrantMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*Grant)(nil)).Elem() +} + +func (o GrantMapOutput) ToGrantMapOutput() GrantMapOutput { + return o +} + +func (o GrantMapOutput) ToGrantMapOutputWithContext(ctx context.Context) GrantMapOutput { + return o +} + +func (o GrantMapOutput) MapIndex(k pulumi.StringInput) GrantOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *Grant { + return vs[0].(map[string]*Grant)[vs[1].(string)] + }).(GrantOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*GrantInput)(nil)).Elem(), &Grant{}) + pulumi.RegisterInputType(reflect.TypeOf((*GrantArrayInput)(nil)).Elem(), GrantArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GrantMapInput)(nil)).Elem(), GrantMap{}) + pulumi.RegisterOutputType(GrantOutput{}) + pulumi.RegisterOutputType(GrantArrayOutput{}) + pulumi.RegisterOutputType(GrantMapOutput{}) +} diff --git a/sdk/go/databricks/init.go b/sdk/go/databricks/init.go index 17817acc..7a5eb2ac 100644 --- a/sdk/go/databricks/init.go +++ b/sdk/go/databricks/init.go @@ -49,6 +49,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &GitCredential{} case "databricks:index/globalInitScript:GlobalInitScript": r = &GlobalInitScript{} + case "databricks:index/grant:Grant": + r = &Grant{} case "databricks:index/grants:Grants": r = &Grants{} case "databricks:index/group:Group": @@ -276,6 +278,11 @@ func init() { "index/globalInitScript", &module{version}, ) + pulumi.RegisterResourceModule( + "databricks", + "index/grant", + &module{version}, + ) pulumi.RegisterResourceModule( "databricks", "index/grants", diff --git a/sdk/go/databricks/metastore.go b/sdk/go/databricks/metastore.go index c6237d83..f119ff6f 100644 --- a/sdk/go/databricks/metastore.go +++ b/sdk/go/databricks/metastore.go @@ -11,6 +11,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be used with account or workspace-level provider. +// // A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. // // Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). diff --git a/sdk/go/databricks/metastoreAssignment.go b/sdk/go/databricks/metastoreAssignment.go index 5acc2f26..c3cf303f 100644 --- a/sdk/go/databricks/metastoreAssignment.go +++ b/sdk/go/databricks/metastoreAssignment.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with account-level provider! +// // A single Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. // // ## Example Usage diff --git a/sdk/go/databricks/metastoreDataAccess.go b/sdk/go/databricks/metastoreDataAccess.go index e4465270..ffc50591 100644 --- a/sdk/go/databricks/metastoreDataAccess.go +++ b/sdk/go/databricks/metastoreDataAccess.go @@ -11,6 +11,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be used with account or workspace-level provider. +// // Optionally, each Metastore can have a default StorageCredential defined as `MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined. // // ## Import @@ -34,11 +36,12 @@ type MetastoreDataAccess struct { ForceUpdate pulumi.BoolPtrOutput `pulumi:"forceUpdate"` GcpServiceAccountKey MetastoreDataAccessGcpServiceAccountKeyPtrOutput `pulumi:"gcpServiceAccountKey"` // whether to set this credential as the default for the metastore. In practice, this should always be true. - IsDefault pulumi.BoolPtrOutput `pulumi:"isDefault"` - MetastoreId pulumi.StringOutput `pulumi:"metastoreId"` - Name pulumi.StringOutput `pulumi:"name"` - Owner pulumi.StringOutput `pulumi:"owner"` - ReadOnly pulumi.BoolPtrOutput `pulumi:"readOnly"` + IsDefault pulumi.BoolPtrOutput `pulumi:"isDefault"` + MetastoreId pulumi.StringOutput `pulumi:"metastoreId"` + Name pulumi.StringOutput `pulumi:"name"` + Owner pulumi.StringOutput `pulumi:"owner"` + ReadOnly pulumi.BoolPtrOutput `pulumi:"readOnly"` + SkipValidation pulumi.BoolPtrOutput `pulumi:"skipValidation"` } // NewMetastoreDataAccess registers a new resource with the given unique name, arguments, and options. @@ -80,11 +83,12 @@ type metastoreDataAccessState struct { ForceUpdate *bool `pulumi:"forceUpdate"` GcpServiceAccountKey *MetastoreDataAccessGcpServiceAccountKey `pulumi:"gcpServiceAccountKey"` // whether to set this credential as the default for the metastore. In practice, this should always be true. - IsDefault *bool `pulumi:"isDefault"` - MetastoreId *string `pulumi:"metastoreId"` - Name *string `pulumi:"name"` - Owner *string `pulumi:"owner"` - ReadOnly *bool `pulumi:"readOnly"` + IsDefault *bool `pulumi:"isDefault"` + MetastoreId *string `pulumi:"metastoreId"` + Name *string `pulumi:"name"` + Owner *string `pulumi:"owner"` + ReadOnly *bool `pulumi:"readOnly"` + SkipValidation *bool `pulumi:"skipValidation"` } type MetastoreDataAccessState struct { @@ -97,11 +101,12 @@ type MetastoreDataAccessState struct { ForceUpdate pulumi.BoolPtrInput GcpServiceAccountKey MetastoreDataAccessGcpServiceAccountKeyPtrInput // whether to set this credential as the default for the metastore. In practice, this should always be true. - IsDefault pulumi.BoolPtrInput - MetastoreId pulumi.StringPtrInput - Name pulumi.StringPtrInput - Owner pulumi.StringPtrInput - ReadOnly pulumi.BoolPtrInput + IsDefault pulumi.BoolPtrInput + MetastoreId pulumi.StringPtrInput + Name pulumi.StringPtrInput + Owner pulumi.StringPtrInput + ReadOnly pulumi.BoolPtrInput + SkipValidation pulumi.BoolPtrInput } func (MetastoreDataAccessState) ElementType() reflect.Type { @@ -118,11 +123,12 @@ type metastoreDataAccessArgs struct { ForceUpdate *bool `pulumi:"forceUpdate"` GcpServiceAccountKey *MetastoreDataAccessGcpServiceAccountKey `pulumi:"gcpServiceAccountKey"` // whether to set this credential as the default for the metastore. In practice, this should always be true. - IsDefault *bool `pulumi:"isDefault"` - MetastoreId *string `pulumi:"metastoreId"` - Name *string `pulumi:"name"` - Owner *string `pulumi:"owner"` - ReadOnly *bool `pulumi:"readOnly"` + IsDefault *bool `pulumi:"isDefault"` + MetastoreId *string `pulumi:"metastoreId"` + Name *string `pulumi:"name"` + Owner *string `pulumi:"owner"` + ReadOnly *bool `pulumi:"readOnly"` + SkipValidation *bool `pulumi:"skipValidation"` } // The set of arguments for constructing a MetastoreDataAccess resource. @@ -136,11 +142,12 @@ type MetastoreDataAccessArgs struct { ForceUpdate pulumi.BoolPtrInput GcpServiceAccountKey MetastoreDataAccessGcpServiceAccountKeyPtrInput // whether to set this credential as the default for the metastore. In practice, this should always be true. - IsDefault pulumi.BoolPtrInput - MetastoreId pulumi.StringPtrInput - Name pulumi.StringPtrInput - Owner pulumi.StringPtrInput - ReadOnly pulumi.BoolPtrInput + IsDefault pulumi.BoolPtrInput + MetastoreId pulumi.StringPtrInput + Name pulumi.StringPtrInput + Owner pulumi.StringPtrInput + ReadOnly pulumi.BoolPtrInput + SkipValidation pulumi.BoolPtrInput } func (MetastoreDataAccessArgs) ElementType() reflect.Type { @@ -291,6 +298,10 @@ func (o MetastoreDataAccessOutput) ReadOnly() pulumi.BoolPtrOutput { return o.ApplyT(func(v *MetastoreDataAccess) pulumi.BoolPtrOutput { return v.ReadOnly }).(pulumi.BoolPtrOutput) } +func (o MetastoreDataAccessOutput) SkipValidation() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *MetastoreDataAccess) pulumi.BoolPtrOutput { return v.SkipValidation }).(pulumi.BoolPtrOutput) +} + type MetastoreDataAccessArrayOutput struct{ *pulumi.OutputState } func (MetastoreDataAccessArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/metastoreProvider.go b/sdk/go/databricks/metastoreProvider.go index 71d535a7..c7ba071f 100644 --- a/sdk/go/databricks/metastoreProvider.go +++ b/sdk/go/databricks/metastoreProvider.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with workspace-level provider! +// // Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. // // A `MetastoreProvider` is contained within Metastore and can contain a list of shares that have been shared with you. diff --git a/sdk/go/databricks/pulumiTypes.go b/sdk/go/databricks/pulumiTypes.go index 3ade34ea..5845f2f5 100644 --- a/sdk/go/databricks/pulumiTypes.go +++ b/sdk/go/databricks/pulumiTypes.go @@ -40343,6 +40343,7 @@ func (o SqlAlertOptionsPtrOutput) Value() pulumi.StringPtrOutput { } type SqlEndpointChannel struct { + DbsqlVersion *string `pulumi:"dbsqlVersion"` // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. Name *string `pulumi:"name"` } @@ -40359,6 +40360,7 @@ type SqlEndpointChannelInput interface { } type SqlEndpointChannelArgs struct { + DbsqlVersion pulumi.StringPtrInput `pulumi:"dbsqlVersion"` // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. Name pulumi.StringPtrInput `pulumi:"name"` } @@ -40440,6 +40442,10 @@ func (o SqlEndpointChannelOutput) ToSqlEndpointChannelPtrOutputWithContext(ctx c }).(SqlEndpointChannelPtrOutput) } +func (o SqlEndpointChannelOutput) DbsqlVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointChannel) *string { return v.DbsqlVersion }).(pulumi.StringPtrOutput) +} + // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. func (o SqlEndpointChannelOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v SqlEndpointChannel) *string { return v.Name }).(pulumi.StringPtrOutput) @@ -40469,6 +40475,15 @@ func (o SqlEndpointChannelPtrOutput) Elem() SqlEndpointChannelOutput { }).(SqlEndpointChannelOutput) } +func (o SqlEndpointChannelPtrOutput) DbsqlVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SqlEndpointChannel) *string { + if v == nil { + return nil + } + return v.DbsqlVersion + }).(pulumi.StringPtrOutput) +} + // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. func (o SqlEndpointChannelPtrOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v *SqlEndpointChannel) *string { @@ -40479,12 +40494,292 @@ func (o SqlEndpointChannelPtrOutput) Name() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +type SqlEndpointHealth struct { + Details *string `pulumi:"details"` + FailureReason *SqlEndpointHealthFailureReason `pulumi:"failureReason"` + Message *string `pulumi:"message"` + Status *string `pulumi:"status"` + Summary *string `pulumi:"summary"` +} + +// SqlEndpointHealthInput is an input type that accepts SqlEndpointHealthArgs and SqlEndpointHealthOutput values. +// You can construct a concrete instance of `SqlEndpointHealthInput` via: +// +// SqlEndpointHealthArgs{...} +type SqlEndpointHealthInput interface { + pulumi.Input + + ToSqlEndpointHealthOutput() SqlEndpointHealthOutput + ToSqlEndpointHealthOutputWithContext(context.Context) SqlEndpointHealthOutput +} + +type SqlEndpointHealthArgs struct { + Details pulumi.StringPtrInput `pulumi:"details"` + FailureReason SqlEndpointHealthFailureReasonPtrInput `pulumi:"failureReason"` + Message pulumi.StringPtrInput `pulumi:"message"` + Status pulumi.StringPtrInput `pulumi:"status"` + Summary pulumi.StringPtrInput `pulumi:"summary"` +} + +func (SqlEndpointHealthArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SqlEndpointHealth)(nil)).Elem() +} + +func (i SqlEndpointHealthArgs) ToSqlEndpointHealthOutput() SqlEndpointHealthOutput { + return i.ToSqlEndpointHealthOutputWithContext(context.Background()) +} + +func (i SqlEndpointHealthArgs) ToSqlEndpointHealthOutputWithContext(ctx context.Context) SqlEndpointHealthOutput { + return pulumi.ToOutputWithContext(ctx, i).(SqlEndpointHealthOutput) +} + +// SqlEndpointHealthArrayInput is an input type that accepts SqlEndpointHealthArray and SqlEndpointHealthArrayOutput values. +// You can construct a concrete instance of `SqlEndpointHealthArrayInput` via: +// +// SqlEndpointHealthArray{ SqlEndpointHealthArgs{...} } +type SqlEndpointHealthArrayInput interface { + pulumi.Input + + ToSqlEndpointHealthArrayOutput() SqlEndpointHealthArrayOutput + ToSqlEndpointHealthArrayOutputWithContext(context.Context) SqlEndpointHealthArrayOutput +} + +type SqlEndpointHealthArray []SqlEndpointHealthInput + +func (SqlEndpointHealthArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]SqlEndpointHealth)(nil)).Elem() +} + +func (i SqlEndpointHealthArray) ToSqlEndpointHealthArrayOutput() SqlEndpointHealthArrayOutput { + return i.ToSqlEndpointHealthArrayOutputWithContext(context.Background()) +} + +func (i SqlEndpointHealthArray) ToSqlEndpointHealthArrayOutputWithContext(ctx context.Context) SqlEndpointHealthArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(SqlEndpointHealthArrayOutput) +} + +type SqlEndpointHealthOutput struct{ *pulumi.OutputState } + +func (SqlEndpointHealthOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SqlEndpointHealth)(nil)).Elem() +} + +func (o SqlEndpointHealthOutput) ToSqlEndpointHealthOutput() SqlEndpointHealthOutput { + return o +} + +func (o SqlEndpointHealthOutput) ToSqlEndpointHealthOutputWithContext(ctx context.Context) SqlEndpointHealthOutput { + return o +} + +func (o SqlEndpointHealthOutput) Details() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointHealth) *string { return v.Details }).(pulumi.StringPtrOutput) +} + +func (o SqlEndpointHealthOutput) FailureReason() SqlEndpointHealthFailureReasonPtrOutput { + return o.ApplyT(func(v SqlEndpointHealth) *SqlEndpointHealthFailureReason { return v.FailureReason }).(SqlEndpointHealthFailureReasonPtrOutput) +} + +func (o SqlEndpointHealthOutput) Message() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointHealth) *string { return v.Message }).(pulumi.StringPtrOutput) +} + +func (o SqlEndpointHealthOutput) Status() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointHealth) *string { return v.Status }).(pulumi.StringPtrOutput) +} + +func (o SqlEndpointHealthOutput) Summary() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointHealth) *string { return v.Summary }).(pulumi.StringPtrOutput) +} + +type SqlEndpointHealthArrayOutput struct{ *pulumi.OutputState } + +func (SqlEndpointHealthArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]SqlEndpointHealth)(nil)).Elem() +} + +func (o SqlEndpointHealthArrayOutput) ToSqlEndpointHealthArrayOutput() SqlEndpointHealthArrayOutput { + return o +} + +func (o SqlEndpointHealthArrayOutput) ToSqlEndpointHealthArrayOutputWithContext(ctx context.Context) SqlEndpointHealthArrayOutput { + return o +} + +func (o SqlEndpointHealthArrayOutput) Index(i pulumi.IntInput) SqlEndpointHealthOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) SqlEndpointHealth { + return vs[0].([]SqlEndpointHealth)[vs[1].(int)] + }).(SqlEndpointHealthOutput) +} + +type SqlEndpointHealthFailureReason struct { + Code *string `pulumi:"code"` + Parameters map[string]interface{} `pulumi:"parameters"` + Type *string `pulumi:"type"` +} + +// SqlEndpointHealthFailureReasonInput is an input type that accepts SqlEndpointHealthFailureReasonArgs and SqlEndpointHealthFailureReasonOutput values. +// You can construct a concrete instance of `SqlEndpointHealthFailureReasonInput` via: +// +// SqlEndpointHealthFailureReasonArgs{...} +type SqlEndpointHealthFailureReasonInput interface { + pulumi.Input + + ToSqlEndpointHealthFailureReasonOutput() SqlEndpointHealthFailureReasonOutput + ToSqlEndpointHealthFailureReasonOutputWithContext(context.Context) SqlEndpointHealthFailureReasonOutput +} + +type SqlEndpointHealthFailureReasonArgs struct { + Code pulumi.StringPtrInput `pulumi:"code"` + Parameters pulumi.MapInput `pulumi:"parameters"` + Type pulumi.StringPtrInput `pulumi:"type"` +} + +func (SqlEndpointHealthFailureReasonArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SqlEndpointHealthFailureReason)(nil)).Elem() +} + +func (i SqlEndpointHealthFailureReasonArgs) ToSqlEndpointHealthFailureReasonOutput() SqlEndpointHealthFailureReasonOutput { + return i.ToSqlEndpointHealthFailureReasonOutputWithContext(context.Background()) +} + +func (i SqlEndpointHealthFailureReasonArgs) ToSqlEndpointHealthFailureReasonOutputWithContext(ctx context.Context) SqlEndpointHealthFailureReasonOutput { + return pulumi.ToOutputWithContext(ctx, i).(SqlEndpointHealthFailureReasonOutput) +} + +func (i SqlEndpointHealthFailureReasonArgs) ToSqlEndpointHealthFailureReasonPtrOutput() SqlEndpointHealthFailureReasonPtrOutput { + return i.ToSqlEndpointHealthFailureReasonPtrOutputWithContext(context.Background()) +} + +func (i SqlEndpointHealthFailureReasonArgs) ToSqlEndpointHealthFailureReasonPtrOutputWithContext(ctx context.Context) SqlEndpointHealthFailureReasonPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SqlEndpointHealthFailureReasonOutput).ToSqlEndpointHealthFailureReasonPtrOutputWithContext(ctx) +} + +// SqlEndpointHealthFailureReasonPtrInput is an input type that accepts SqlEndpointHealthFailureReasonArgs, SqlEndpointHealthFailureReasonPtr and SqlEndpointHealthFailureReasonPtrOutput values. +// You can construct a concrete instance of `SqlEndpointHealthFailureReasonPtrInput` via: +// +// SqlEndpointHealthFailureReasonArgs{...} +// +// or: +// +// nil +type SqlEndpointHealthFailureReasonPtrInput interface { + pulumi.Input + + ToSqlEndpointHealthFailureReasonPtrOutput() SqlEndpointHealthFailureReasonPtrOutput + ToSqlEndpointHealthFailureReasonPtrOutputWithContext(context.Context) SqlEndpointHealthFailureReasonPtrOutput +} + +type sqlEndpointHealthFailureReasonPtrType SqlEndpointHealthFailureReasonArgs + +func SqlEndpointHealthFailureReasonPtr(v *SqlEndpointHealthFailureReasonArgs) SqlEndpointHealthFailureReasonPtrInput { + return (*sqlEndpointHealthFailureReasonPtrType)(v) +} + +func (*sqlEndpointHealthFailureReasonPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**SqlEndpointHealthFailureReason)(nil)).Elem() +} + +func (i *sqlEndpointHealthFailureReasonPtrType) ToSqlEndpointHealthFailureReasonPtrOutput() SqlEndpointHealthFailureReasonPtrOutput { + return i.ToSqlEndpointHealthFailureReasonPtrOutputWithContext(context.Background()) +} + +func (i *sqlEndpointHealthFailureReasonPtrType) ToSqlEndpointHealthFailureReasonPtrOutputWithContext(ctx context.Context) SqlEndpointHealthFailureReasonPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SqlEndpointHealthFailureReasonPtrOutput) +} + +type SqlEndpointHealthFailureReasonOutput struct{ *pulumi.OutputState } + +func (SqlEndpointHealthFailureReasonOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SqlEndpointHealthFailureReason)(nil)).Elem() +} + +func (o SqlEndpointHealthFailureReasonOutput) ToSqlEndpointHealthFailureReasonOutput() SqlEndpointHealthFailureReasonOutput { + return o +} + +func (o SqlEndpointHealthFailureReasonOutput) ToSqlEndpointHealthFailureReasonOutputWithContext(ctx context.Context) SqlEndpointHealthFailureReasonOutput { + return o +} + +func (o SqlEndpointHealthFailureReasonOutput) ToSqlEndpointHealthFailureReasonPtrOutput() SqlEndpointHealthFailureReasonPtrOutput { + return o.ToSqlEndpointHealthFailureReasonPtrOutputWithContext(context.Background()) +} + +func (o SqlEndpointHealthFailureReasonOutput) ToSqlEndpointHealthFailureReasonPtrOutputWithContext(ctx context.Context) SqlEndpointHealthFailureReasonPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v SqlEndpointHealthFailureReason) *SqlEndpointHealthFailureReason { + return &v + }).(SqlEndpointHealthFailureReasonPtrOutput) +} + +func (o SqlEndpointHealthFailureReasonOutput) Code() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointHealthFailureReason) *string { return v.Code }).(pulumi.StringPtrOutput) +} + +func (o SqlEndpointHealthFailureReasonOutput) Parameters() pulumi.MapOutput { + return o.ApplyT(func(v SqlEndpointHealthFailureReason) map[string]interface{} { return v.Parameters }).(pulumi.MapOutput) +} + +func (o SqlEndpointHealthFailureReasonOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointHealthFailureReason) *string { return v.Type }).(pulumi.StringPtrOutput) +} + +type SqlEndpointHealthFailureReasonPtrOutput struct{ *pulumi.OutputState } + +func (SqlEndpointHealthFailureReasonPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SqlEndpointHealthFailureReason)(nil)).Elem() +} + +func (o SqlEndpointHealthFailureReasonPtrOutput) ToSqlEndpointHealthFailureReasonPtrOutput() SqlEndpointHealthFailureReasonPtrOutput { + return o +} + +func (o SqlEndpointHealthFailureReasonPtrOutput) ToSqlEndpointHealthFailureReasonPtrOutputWithContext(ctx context.Context) SqlEndpointHealthFailureReasonPtrOutput { + return o +} + +func (o SqlEndpointHealthFailureReasonPtrOutput) Elem() SqlEndpointHealthFailureReasonOutput { + return o.ApplyT(func(v *SqlEndpointHealthFailureReason) SqlEndpointHealthFailureReason { + if v != nil { + return *v + } + var ret SqlEndpointHealthFailureReason + return ret + }).(SqlEndpointHealthFailureReasonOutput) +} + +func (o SqlEndpointHealthFailureReasonPtrOutput) Code() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SqlEndpointHealthFailureReason) *string { + if v == nil { + return nil + } + return v.Code + }).(pulumi.StringPtrOutput) +} + +func (o SqlEndpointHealthFailureReasonPtrOutput) Parameters() pulumi.MapOutput { + return o.ApplyT(func(v *SqlEndpointHealthFailureReason) map[string]interface{} { + if v == nil { + return nil + } + return v.Parameters + }).(pulumi.MapOutput) +} + +func (o SqlEndpointHealthFailureReasonPtrOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SqlEndpointHealthFailureReason) *string { + if v == nil { + return nil + } + return v.Type + }).(pulumi.StringPtrOutput) +} + type SqlEndpointOdbcParams struct { - Host *string `pulumi:"host"` Hostname *string `pulumi:"hostname"` - Path string `pulumi:"path"` - Port int `pulumi:"port"` - Protocol string `pulumi:"protocol"` + Path *string `pulumi:"path"` + Port *int `pulumi:"port"` + Protocol *string `pulumi:"protocol"` } // SqlEndpointOdbcParamsInput is an input type that accepts SqlEndpointOdbcParamsArgs and SqlEndpointOdbcParamsOutput values. @@ -40499,11 +40794,10 @@ type SqlEndpointOdbcParamsInput interface { } type SqlEndpointOdbcParamsArgs struct { - Host pulumi.StringPtrInput `pulumi:"host"` Hostname pulumi.StringPtrInput `pulumi:"hostname"` - Path pulumi.StringInput `pulumi:"path"` - Port pulumi.IntInput `pulumi:"port"` - Protocol pulumi.StringInput `pulumi:"protocol"` + Path pulumi.StringPtrInput `pulumi:"path"` + Port pulumi.IntPtrInput `pulumi:"port"` + Protocol pulumi.StringPtrInput `pulumi:"protocol"` } func (SqlEndpointOdbcParamsArgs) ElementType() reflect.Type { @@ -40583,24 +40877,20 @@ func (o SqlEndpointOdbcParamsOutput) ToSqlEndpointOdbcParamsPtrOutputWithContext }).(SqlEndpointOdbcParamsPtrOutput) } -func (o SqlEndpointOdbcParamsOutput) Host() pulumi.StringPtrOutput { - return o.ApplyT(func(v SqlEndpointOdbcParams) *string { return v.Host }).(pulumi.StringPtrOutput) -} - func (o SqlEndpointOdbcParamsOutput) Hostname() pulumi.StringPtrOutput { return o.ApplyT(func(v SqlEndpointOdbcParams) *string { return v.Hostname }).(pulumi.StringPtrOutput) } -func (o SqlEndpointOdbcParamsOutput) Path() pulumi.StringOutput { - return o.ApplyT(func(v SqlEndpointOdbcParams) string { return v.Path }).(pulumi.StringOutput) +func (o SqlEndpointOdbcParamsOutput) Path() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointOdbcParams) *string { return v.Path }).(pulumi.StringPtrOutput) } -func (o SqlEndpointOdbcParamsOutput) Port() pulumi.IntOutput { - return o.ApplyT(func(v SqlEndpointOdbcParams) int { return v.Port }).(pulumi.IntOutput) +func (o SqlEndpointOdbcParamsOutput) Port() pulumi.IntPtrOutput { + return o.ApplyT(func(v SqlEndpointOdbcParams) *int { return v.Port }).(pulumi.IntPtrOutput) } -func (o SqlEndpointOdbcParamsOutput) Protocol() pulumi.StringOutput { - return o.ApplyT(func(v SqlEndpointOdbcParams) string { return v.Protocol }).(pulumi.StringOutput) +func (o SqlEndpointOdbcParamsOutput) Protocol() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlEndpointOdbcParams) *string { return v.Protocol }).(pulumi.StringPtrOutput) } type SqlEndpointOdbcParamsPtrOutput struct{ *pulumi.OutputState } @@ -40627,15 +40917,6 @@ func (o SqlEndpointOdbcParamsPtrOutput) Elem() SqlEndpointOdbcParamsOutput { }).(SqlEndpointOdbcParamsOutput) } -func (o SqlEndpointOdbcParamsPtrOutput) Host() pulumi.StringPtrOutput { - return o.ApplyT(func(v *SqlEndpointOdbcParams) *string { - if v == nil { - return nil - } - return v.Host - }).(pulumi.StringPtrOutput) -} - func (o SqlEndpointOdbcParamsPtrOutput) Hostname() pulumi.StringPtrOutput { return o.ApplyT(func(v *SqlEndpointOdbcParams) *string { if v == nil { @@ -40650,7 +40931,7 @@ func (o SqlEndpointOdbcParamsPtrOutput) Path() pulumi.StringPtrOutput { if v == nil { return nil } - return &v.Path + return v.Path }).(pulumi.StringPtrOutput) } @@ -40659,7 +40940,7 @@ func (o SqlEndpointOdbcParamsPtrOutput) Port() pulumi.IntPtrOutput { if v == nil { return nil } - return &v.Port + return v.Port }).(pulumi.IntPtrOutput) } @@ -40668,7 +40949,7 @@ func (o SqlEndpointOdbcParamsPtrOutput) Protocol() pulumi.StringPtrOutput { if v == nil { return nil } - return &v.Protocol + return v.Protocol }).(pulumi.StringPtrOutput) } @@ -50068,6 +50349,462 @@ func (o GetClusterClusterInfoTerminationReasonPtrOutput) Type() pulumi.StringPtr }).(pulumi.StringPtrOutput) } +type GetCurrentMetastoreMetastoreInfo struct { + Cloud *string `pulumi:"cloud"` + // Timestamp (in milliseconds) when the current metastore was created. + CreatedAt *int `pulumi:"createdAt"` + // the ID of the identity that created the current metastore. + CreatedBy *string `pulumi:"createdBy"` + // the ID of the default data access configuration. + DefaultDataAccessConfigId *string `pulumi:"defaultDataAccessConfigId"` + // The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + DeltaSharingOrganizationName *string `pulumi:"deltaSharingOrganizationName"` + // the expiration duration in seconds on recipient data access tokens. + DeltaSharingRecipientTokenLifetimeInSeconds *int `pulumi:"deltaSharingRecipientTokenLifetimeInSeconds"` + // Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + DeltaSharingScope *string `pulumi:"deltaSharingScope"` + // Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + GlobalMetastoreId *string `pulumi:"globalMetastoreId"` + // Metastore ID. + MetastoreId *string `pulumi:"metastoreId"` + // Name of metastore. + Name *string `pulumi:"name"` + // Username/group name/sp applicationId of the metastore owner. + Owner *string `pulumi:"owner"` + // the version of the privilege model used by the metastore. + PrivilegeModelVersion *string `pulumi:"privilegeModelVersion"` + // (Mandatory for account-level) The region of the metastore. + Region *string `pulumi:"region"` + // Path on cloud storage account, where managed `Table` are stored. + StorageRoot *string `pulumi:"storageRoot"` + // ID of a storage credential used for the `storageRoot`. + StorageRootCredentialId *string `pulumi:"storageRootCredentialId"` + // Name of a storage credential used for the `storageRoot`. + StorageRootCredentialName *string `pulumi:"storageRootCredentialName"` + // Timestamp (in milliseconds) when the current metastore was updated. + UpdatedAt *int `pulumi:"updatedAt"` + // the ID of the identity that updated the current metastore. + UpdatedBy *string `pulumi:"updatedBy"` +} + +// GetCurrentMetastoreMetastoreInfoInput is an input type that accepts GetCurrentMetastoreMetastoreInfoArgs and GetCurrentMetastoreMetastoreInfoOutput values. +// You can construct a concrete instance of `GetCurrentMetastoreMetastoreInfoInput` via: +// +// GetCurrentMetastoreMetastoreInfoArgs{...} +type GetCurrentMetastoreMetastoreInfoInput interface { + pulumi.Input + + ToGetCurrentMetastoreMetastoreInfoOutput() GetCurrentMetastoreMetastoreInfoOutput + ToGetCurrentMetastoreMetastoreInfoOutputWithContext(context.Context) GetCurrentMetastoreMetastoreInfoOutput +} + +type GetCurrentMetastoreMetastoreInfoArgs struct { + Cloud pulumi.StringPtrInput `pulumi:"cloud"` + // Timestamp (in milliseconds) when the current metastore was created. + CreatedAt pulumi.IntPtrInput `pulumi:"createdAt"` + // the ID of the identity that created the current metastore. + CreatedBy pulumi.StringPtrInput `pulumi:"createdBy"` + // the ID of the default data access configuration. + DefaultDataAccessConfigId pulumi.StringPtrInput `pulumi:"defaultDataAccessConfigId"` + // The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + DeltaSharingOrganizationName pulumi.StringPtrInput `pulumi:"deltaSharingOrganizationName"` + // the expiration duration in seconds on recipient data access tokens. + DeltaSharingRecipientTokenLifetimeInSeconds pulumi.IntPtrInput `pulumi:"deltaSharingRecipientTokenLifetimeInSeconds"` + // Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + DeltaSharingScope pulumi.StringPtrInput `pulumi:"deltaSharingScope"` + // Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + GlobalMetastoreId pulumi.StringPtrInput `pulumi:"globalMetastoreId"` + // Metastore ID. + MetastoreId pulumi.StringPtrInput `pulumi:"metastoreId"` + // Name of metastore. + Name pulumi.StringPtrInput `pulumi:"name"` + // Username/group name/sp applicationId of the metastore owner. + Owner pulumi.StringPtrInput `pulumi:"owner"` + // the version of the privilege model used by the metastore. + PrivilegeModelVersion pulumi.StringPtrInput `pulumi:"privilegeModelVersion"` + // (Mandatory for account-level) The region of the metastore. + Region pulumi.StringPtrInput `pulumi:"region"` + // Path on cloud storage account, where managed `Table` are stored. + StorageRoot pulumi.StringPtrInput `pulumi:"storageRoot"` + // ID of a storage credential used for the `storageRoot`. + StorageRootCredentialId pulumi.StringPtrInput `pulumi:"storageRootCredentialId"` + // Name of a storage credential used for the `storageRoot`. + StorageRootCredentialName pulumi.StringPtrInput `pulumi:"storageRootCredentialName"` + // Timestamp (in milliseconds) when the current metastore was updated. + UpdatedAt pulumi.IntPtrInput `pulumi:"updatedAt"` + // the ID of the identity that updated the current metastore. + UpdatedBy pulumi.StringPtrInput `pulumi:"updatedBy"` +} + +func (GetCurrentMetastoreMetastoreInfoArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCurrentMetastoreMetastoreInfo)(nil)).Elem() +} + +func (i GetCurrentMetastoreMetastoreInfoArgs) ToGetCurrentMetastoreMetastoreInfoOutput() GetCurrentMetastoreMetastoreInfoOutput { + return i.ToGetCurrentMetastoreMetastoreInfoOutputWithContext(context.Background()) +} + +func (i GetCurrentMetastoreMetastoreInfoArgs) ToGetCurrentMetastoreMetastoreInfoOutputWithContext(ctx context.Context) GetCurrentMetastoreMetastoreInfoOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCurrentMetastoreMetastoreInfoOutput) +} + +func (i GetCurrentMetastoreMetastoreInfoArgs) ToGetCurrentMetastoreMetastoreInfoPtrOutput() GetCurrentMetastoreMetastoreInfoPtrOutput { + return i.ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(context.Background()) +} + +func (i GetCurrentMetastoreMetastoreInfoArgs) ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(ctx context.Context) GetCurrentMetastoreMetastoreInfoPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCurrentMetastoreMetastoreInfoOutput).ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(ctx) +} + +// GetCurrentMetastoreMetastoreInfoPtrInput is an input type that accepts GetCurrentMetastoreMetastoreInfoArgs, GetCurrentMetastoreMetastoreInfoPtr and GetCurrentMetastoreMetastoreInfoPtrOutput values. +// You can construct a concrete instance of `GetCurrentMetastoreMetastoreInfoPtrInput` via: +// +// GetCurrentMetastoreMetastoreInfoArgs{...} +// +// or: +// +// nil +type GetCurrentMetastoreMetastoreInfoPtrInput interface { + pulumi.Input + + ToGetCurrentMetastoreMetastoreInfoPtrOutput() GetCurrentMetastoreMetastoreInfoPtrOutput + ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(context.Context) GetCurrentMetastoreMetastoreInfoPtrOutput +} + +type getCurrentMetastoreMetastoreInfoPtrType GetCurrentMetastoreMetastoreInfoArgs + +func GetCurrentMetastoreMetastoreInfoPtr(v *GetCurrentMetastoreMetastoreInfoArgs) GetCurrentMetastoreMetastoreInfoPtrInput { + return (*getCurrentMetastoreMetastoreInfoPtrType)(v) +} + +func (*getCurrentMetastoreMetastoreInfoPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetCurrentMetastoreMetastoreInfo)(nil)).Elem() +} + +func (i *getCurrentMetastoreMetastoreInfoPtrType) ToGetCurrentMetastoreMetastoreInfoPtrOutput() GetCurrentMetastoreMetastoreInfoPtrOutput { + return i.ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(context.Background()) +} + +func (i *getCurrentMetastoreMetastoreInfoPtrType) ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(ctx context.Context) GetCurrentMetastoreMetastoreInfoPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCurrentMetastoreMetastoreInfoPtrOutput) +} + +type GetCurrentMetastoreMetastoreInfoOutput struct{ *pulumi.OutputState } + +func (GetCurrentMetastoreMetastoreInfoOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCurrentMetastoreMetastoreInfo)(nil)).Elem() +} + +func (o GetCurrentMetastoreMetastoreInfoOutput) ToGetCurrentMetastoreMetastoreInfoOutput() GetCurrentMetastoreMetastoreInfoOutput { + return o +} + +func (o GetCurrentMetastoreMetastoreInfoOutput) ToGetCurrentMetastoreMetastoreInfoOutputWithContext(ctx context.Context) GetCurrentMetastoreMetastoreInfoOutput { + return o +} + +func (o GetCurrentMetastoreMetastoreInfoOutput) ToGetCurrentMetastoreMetastoreInfoPtrOutput() GetCurrentMetastoreMetastoreInfoPtrOutput { + return o.ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(context.Background()) +} + +func (o GetCurrentMetastoreMetastoreInfoOutput) ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(ctx context.Context) GetCurrentMetastoreMetastoreInfoPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetCurrentMetastoreMetastoreInfo) *GetCurrentMetastoreMetastoreInfo { + return &v + }).(GetCurrentMetastoreMetastoreInfoPtrOutput) +} + +func (o GetCurrentMetastoreMetastoreInfoOutput) Cloud() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.Cloud }).(pulumi.StringPtrOutput) +} + +// Timestamp (in milliseconds) when the current metastore was created. +func (o GetCurrentMetastoreMetastoreInfoOutput) CreatedAt() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *int { return v.CreatedAt }).(pulumi.IntPtrOutput) +} + +// the ID of the identity that created the current metastore. +func (o GetCurrentMetastoreMetastoreInfoOutput) CreatedBy() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.CreatedBy }).(pulumi.StringPtrOutput) +} + +// the ID of the default data access configuration. +func (o GetCurrentMetastoreMetastoreInfoOutput) DefaultDataAccessConfigId() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.DefaultDataAccessConfigId }).(pulumi.StringPtrOutput) +} + +// The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. +func (o GetCurrentMetastoreMetastoreInfoOutput) DeltaSharingOrganizationName() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.DeltaSharingOrganizationName }).(pulumi.StringPtrOutput) +} + +// the expiration duration in seconds on recipient data access tokens. +func (o GetCurrentMetastoreMetastoreInfoOutput) DeltaSharingRecipientTokenLifetimeInSeconds() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *int { return v.DeltaSharingRecipientTokenLifetimeInSeconds }).(pulumi.IntPtrOutput) +} + +// Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. +func (o GetCurrentMetastoreMetastoreInfoOutput) DeltaSharingScope() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.DeltaSharingScope }).(pulumi.StringPtrOutput) +} + +// Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. +func (o GetCurrentMetastoreMetastoreInfoOutput) GlobalMetastoreId() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.GlobalMetastoreId }).(pulumi.StringPtrOutput) +} + +// Metastore ID. +func (o GetCurrentMetastoreMetastoreInfoOutput) MetastoreId() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.MetastoreId }).(pulumi.StringPtrOutput) +} + +// Name of metastore. +func (o GetCurrentMetastoreMetastoreInfoOutput) Name() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.Name }).(pulumi.StringPtrOutput) +} + +// Username/group name/sp applicationId of the metastore owner. +func (o GetCurrentMetastoreMetastoreInfoOutput) Owner() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.Owner }).(pulumi.StringPtrOutput) +} + +// the version of the privilege model used by the metastore. +func (o GetCurrentMetastoreMetastoreInfoOutput) PrivilegeModelVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.PrivilegeModelVersion }).(pulumi.StringPtrOutput) +} + +// (Mandatory for account-level) The region of the metastore. +func (o GetCurrentMetastoreMetastoreInfoOutput) Region() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.Region }).(pulumi.StringPtrOutput) +} + +// Path on cloud storage account, where managed `Table` are stored. +func (o GetCurrentMetastoreMetastoreInfoOutput) StorageRoot() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.StorageRoot }).(pulumi.StringPtrOutput) +} + +// ID of a storage credential used for the `storageRoot`. +func (o GetCurrentMetastoreMetastoreInfoOutput) StorageRootCredentialId() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.StorageRootCredentialId }).(pulumi.StringPtrOutput) +} + +// Name of a storage credential used for the `storageRoot`. +func (o GetCurrentMetastoreMetastoreInfoOutput) StorageRootCredentialName() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.StorageRootCredentialName }).(pulumi.StringPtrOutput) +} + +// Timestamp (in milliseconds) when the current metastore was updated. +func (o GetCurrentMetastoreMetastoreInfoOutput) UpdatedAt() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *int { return v.UpdatedAt }).(pulumi.IntPtrOutput) +} + +// the ID of the identity that updated the current metastore. +func (o GetCurrentMetastoreMetastoreInfoOutput) UpdatedBy() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCurrentMetastoreMetastoreInfo) *string { return v.UpdatedBy }).(pulumi.StringPtrOutput) +} + +type GetCurrentMetastoreMetastoreInfoPtrOutput struct{ *pulumi.OutputState } + +func (GetCurrentMetastoreMetastoreInfoPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetCurrentMetastoreMetastoreInfo)(nil)).Elem() +} + +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) ToGetCurrentMetastoreMetastoreInfoPtrOutput() GetCurrentMetastoreMetastoreInfoPtrOutput { + return o +} + +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) ToGetCurrentMetastoreMetastoreInfoPtrOutputWithContext(ctx context.Context) GetCurrentMetastoreMetastoreInfoPtrOutput { + return o +} + +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) Elem() GetCurrentMetastoreMetastoreInfoOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) GetCurrentMetastoreMetastoreInfo { + if v != nil { + return *v + } + var ret GetCurrentMetastoreMetastoreInfo + return ret + }).(GetCurrentMetastoreMetastoreInfoOutput) +} + +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) Cloud() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.Cloud + }).(pulumi.StringPtrOutput) +} + +// Timestamp (in milliseconds) when the current metastore was created. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) CreatedAt() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *int { + if v == nil { + return nil + } + return v.CreatedAt + }).(pulumi.IntPtrOutput) +} + +// the ID of the identity that created the current metastore. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) CreatedBy() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.CreatedBy + }).(pulumi.StringPtrOutput) +} + +// the ID of the default data access configuration. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) DefaultDataAccessConfigId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.DefaultDataAccessConfigId + }).(pulumi.StringPtrOutput) +} + +// The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) DeltaSharingOrganizationName() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.DeltaSharingOrganizationName + }).(pulumi.StringPtrOutput) +} + +// the expiration duration in seconds on recipient data access tokens. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) DeltaSharingRecipientTokenLifetimeInSeconds() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *int { + if v == nil { + return nil + } + return v.DeltaSharingRecipientTokenLifetimeInSeconds + }).(pulumi.IntPtrOutput) +} + +// Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) DeltaSharingScope() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.DeltaSharingScope + }).(pulumi.StringPtrOutput) +} + +// Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) GlobalMetastoreId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.GlobalMetastoreId + }).(pulumi.StringPtrOutput) +} + +// Metastore ID. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) MetastoreId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.MetastoreId + }).(pulumi.StringPtrOutput) +} + +// Name of metastore. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) Name() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.Name + }).(pulumi.StringPtrOutput) +} + +// Username/group name/sp applicationId of the metastore owner. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) Owner() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.Owner + }).(pulumi.StringPtrOutput) +} + +// the version of the privilege model used by the metastore. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) PrivilegeModelVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.PrivilegeModelVersion + }).(pulumi.StringPtrOutput) +} + +// (Mandatory for account-level) The region of the metastore. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) Region() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.Region + }).(pulumi.StringPtrOutput) +} + +// Path on cloud storage account, where managed `Table` are stored. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) StorageRoot() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.StorageRoot + }).(pulumi.StringPtrOutput) +} + +// ID of a storage credential used for the `storageRoot`. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) StorageRootCredentialId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.StorageRootCredentialId + }).(pulumi.StringPtrOutput) +} + +// Name of a storage credential used for the `storageRoot`. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) StorageRootCredentialName() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.StorageRootCredentialName + }).(pulumi.StringPtrOutput) +} + +// Timestamp (in milliseconds) when the current metastore was updated. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) UpdatedAt() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *int { + if v == nil { + return nil + } + return v.UpdatedAt + }).(pulumi.IntPtrOutput) +} + +// the ID of the identity that updated the current metastore. +func (o GetCurrentMetastoreMetastoreInfoPtrOutput) UpdatedBy() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetCurrentMetastoreMetastoreInfo) *string { + if v == nil { + return nil + } + return v.UpdatedBy + }).(pulumi.StringPtrOutput) +} + type GetDbfsFilePathsPathList struct { FileSize *int `pulumi:"fileSize"` // Path on DBFS for the file to perform listing @@ -74089,7 +74826,7 @@ type GetMetastoreMetastoreInfo struct { Owner *string `pulumi:"owner"` PrivilegeModelVersion *string `pulumi:"privilegeModelVersion"` Region *string `pulumi:"region"` - // Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. + // Path on cloud storage account, where managed `Table` are stored. StorageRoot *string `pulumi:"storageRoot"` StorageRootCredentialId *string `pulumi:"storageRootCredentialId"` StorageRootCredentialName *string `pulumi:"storageRootCredentialName"` @@ -74128,7 +74865,7 @@ type GetMetastoreMetastoreInfoArgs struct { Owner pulumi.StringPtrInput `pulumi:"owner"` PrivilegeModelVersion pulumi.StringPtrInput `pulumi:"privilegeModelVersion"` Region pulumi.StringPtrInput `pulumi:"region"` - // Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. + // Path on cloud storage account, where managed `Table` are stored. StorageRoot pulumi.StringPtrInput `pulumi:"storageRoot"` StorageRootCredentialId pulumi.StringPtrInput `pulumi:"storageRootCredentialId"` StorageRootCredentialName pulumi.StringPtrInput `pulumi:"storageRootCredentialName"` @@ -74271,7 +75008,7 @@ func (o GetMetastoreMetastoreInfoOutput) Region() pulumi.StringPtrOutput { return o.ApplyT(func(v GetMetastoreMetastoreInfo) *string { return v.Region }).(pulumi.StringPtrOutput) } -// Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. +// Path on cloud storage account, where managed `Table` are stored. func (o GetMetastoreMetastoreInfoOutput) StorageRoot() pulumi.StringPtrOutput { return o.ApplyT(func(v GetMetastoreMetastoreInfo) *string { return v.StorageRoot }).(pulumi.StringPtrOutput) } @@ -74439,7 +75176,7 @@ func (o GetMetastoreMetastoreInfoPtrOutput) Region() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } -// Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. +// Path on cloud storage account, where managed `Table` are stored. func (o GetMetastoreMetastoreInfoPtrOutput) StorageRoot() pulumi.StringPtrOutput { return o.ApplyT(func(v *GetMetastoreMetastoreInfo) *string { if v == nil { @@ -75339,6 +76076,7 @@ func (o GetShareObjectPartitionValueArrayOutput) Index(i pulumi.IntInput) GetSha } type GetSqlWarehouseChannel struct { + DbsqlVersion *string `pulumi:"dbsqlVersion"` // Name of the SQL warehouse to search (case-sensitive). Name *string `pulumi:"name"` } @@ -75355,6 +76093,7 @@ type GetSqlWarehouseChannelInput interface { } type GetSqlWarehouseChannelArgs struct { + DbsqlVersion pulumi.StringPtrInput `pulumi:"dbsqlVersion"` // Name of the SQL warehouse to search (case-sensitive). Name pulumi.StringPtrInput `pulumi:"name"` } @@ -75436,6 +76175,10 @@ func (o GetSqlWarehouseChannelOutput) ToGetSqlWarehouseChannelPtrOutputWithConte }).(GetSqlWarehouseChannelPtrOutput) } +func (o GetSqlWarehouseChannelOutput) DbsqlVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseChannel) *string { return v.DbsqlVersion }).(pulumi.StringPtrOutput) +} + // Name of the SQL warehouse to search (case-sensitive). func (o GetSqlWarehouseChannelOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v GetSqlWarehouseChannel) *string { return v.Name }).(pulumi.StringPtrOutput) @@ -75465,6 +76208,15 @@ func (o GetSqlWarehouseChannelPtrOutput) Elem() GetSqlWarehouseChannelOutput { }).(GetSqlWarehouseChannelOutput) } +func (o GetSqlWarehouseChannelPtrOutput) DbsqlVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseChannel) *string { + if v == nil { + return nil + } + return v.DbsqlVersion + }).(pulumi.StringPtrOutput) +} + // Name of the SQL warehouse to search (case-sensitive). func (o GetSqlWarehouseChannelPtrOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v *GetSqlWarehouseChannel) *string { @@ -75475,12 +76227,367 @@ func (o GetSqlWarehouseChannelPtrOutput) Name() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +type GetSqlWarehouseHealth struct { + Details *string `pulumi:"details"` + FailureReason *GetSqlWarehouseHealthFailureReason `pulumi:"failureReason"` + Message *string `pulumi:"message"` + Status *string `pulumi:"status"` + Summary *string `pulumi:"summary"` +} + +// GetSqlWarehouseHealthInput is an input type that accepts GetSqlWarehouseHealthArgs and GetSqlWarehouseHealthOutput values. +// You can construct a concrete instance of `GetSqlWarehouseHealthInput` via: +// +// GetSqlWarehouseHealthArgs{...} +type GetSqlWarehouseHealthInput interface { + pulumi.Input + + ToGetSqlWarehouseHealthOutput() GetSqlWarehouseHealthOutput + ToGetSqlWarehouseHealthOutputWithContext(context.Context) GetSqlWarehouseHealthOutput +} + +type GetSqlWarehouseHealthArgs struct { + Details pulumi.StringPtrInput `pulumi:"details"` + FailureReason GetSqlWarehouseHealthFailureReasonPtrInput `pulumi:"failureReason"` + Message pulumi.StringPtrInput `pulumi:"message"` + Status pulumi.StringPtrInput `pulumi:"status"` + Summary pulumi.StringPtrInput `pulumi:"summary"` +} + +func (GetSqlWarehouseHealthArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetSqlWarehouseHealth)(nil)).Elem() +} + +func (i GetSqlWarehouseHealthArgs) ToGetSqlWarehouseHealthOutput() GetSqlWarehouseHealthOutput { + return i.ToGetSqlWarehouseHealthOutputWithContext(context.Background()) +} + +func (i GetSqlWarehouseHealthArgs) ToGetSqlWarehouseHealthOutputWithContext(ctx context.Context) GetSqlWarehouseHealthOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetSqlWarehouseHealthOutput) +} + +func (i GetSqlWarehouseHealthArgs) ToGetSqlWarehouseHealthPtrOutput() GetSqlWarehouseHealthPtrOutput { + return i.ToGetSqlWarehouseHealthPtrOutputWithContext(context.Background()) +} + +func (i GetSqlWarehouseHealthArgs) ToGetSqlWarehouseHealthPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetSqlWarehouseHealthOutput).ToGetSqlWarehouseHealthPtrOutputWithContext(ctx) +} + +// GetSqlWarehouseHealthPtrInput is an input type that accepts GetSqlWarehouseHealthArgs, GetSqlWarehouseHealthPtr and GetSqlWarehouseHealthPtrOutput values. +// You can construct a concrete instance of `GetSqlWarehouseHealthPtrInput` via: +// +// GetSqlWarehouseHealthArgs{...} +// +// or: +// +// nil +type GetSqlWarehouseHealthPtrInput interface { + pulumi.Input + + ToGetSqlWarehouseHealthPtrOutput() GetSqlWarehouseHealthPtrOutput + ToGetSqlWarehouseHealthPtrOutputWithContext(context.Context) GetSqlWarehouseHealthPtrOutput +} + +type getSqlWarehouseHealthPtrType GetSqlWarehouseHealthArgs + +func GetSqlWarehouseHealthPtr(v *GetSqlWarehouseHealthArgs) GetSqlWarehouseHealthPtrInput { + return (*getSqlWarehouseHealthPtrType)(v) +} + +func (*getSqlWarehouseHealthPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetSqlWarehouseHealth)(nil)).Elem() +} + +func (i *getSqlWarehouseHealthPtrType) ToGetSqlWarehouseHealthPtrOutput() GetSqlWarehouseHealthPtrOutput { + return i.ToGetSqlWarehouseHealthPtrOutputWithContext(context.Background()) +} + +func (i *getSqlWarehouseHealthPtrType) ToGetSqlWarehouseHealthPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetSqlWarehouseHealthPtrOutput) +} + +type GetSqlWarehouseHealthOutput struct{ *pulumi.OutputState } + +func (GetSqlWarehouseHealthOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetSqlWarehouseHealth)(nil)).Elem() +} + +func (o GetSqlWarehouseHealthOutput) ToGetSqlWarehouseHealthOutput() GetSqlWarehouseHealthOutput { + return o +} + +func (o GetSqlWarehouseHealthOutput) ToGetSqlWarehouseHealthOutputWithContext(ctx context.Context) GetSqlWarehouseHealthOutput { + return o +} + +func (o GetSqlWarehouseHealthOutput) ToGetSqlWarehouseHealthPtrOutput() GetSqlWarehouseHealthPtrOutput { + return o.ToGetSqlWarehouseHealthPtrOutputWithContext(context.Background()) +} + +func (o GetSqlWarehouseHealthOutput) ToGetSqlWarehouseHealthPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetSqlWarehouseHealth) *GetSqlWarehouseHealth { + return &v + }).(GetSqlWarehouseHealthPtrOutput) +} + +func (o GetSqlWarehouseHealthOutput) Details() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseHealth) *string { return v.Details }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthOutput) FailureReason() GetSqlWarehouseHealthFailureReasonPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseHealth) *GetSqlWarehouseHealthFailureReason { return v.FailureReason }).(GetSqlWarehouseHealthFailureReasonPtrOutput) +} + +func (o GetSqlWarehouseHealthOutput) Message() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseHealth) *string { return v.Message }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthOutput) Status() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseHealth) *string { return v.Status }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthOutput) Summary() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseHealth) *string { return v.Summary }).(pulumi.StringPtrOutput) +} + +type GetSqlWarehouseHealthPtrOutput struct{ *pulumi.OutputState } + +func (GetSqlWarehouseHealthPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetSqlWarehouseHealth)(nil)).Elem() +} + +func (o GetSqlWarehouseHealthPtrOutput) ToGetSqlWarehouseHealthPtrOutput() GetSqlWarehouseHealthPtrOutput { + return o +} + +func (o GetSqlWarehouseHealthPtrOutput) ToGetSqlWarehouseHealthPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthPtrOutput { + return o +} + +func (o GetSqlWarehouseHealthPtrOutput) Elem() GetSqlWarehouseHealthOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealth) GetSqlWarehouseHealth { + if v != nil { + return *v + } + var ret GetSqlWarehouseHealth + return ret + }).(GetSqlWarehouseHealthOutput) +} + +func (o GetSqlWarehouseHealthPtrOutput) Details() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealth) *string { + if v == nil { + return nil + } + return v.Details + }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthPtrOutput) FailureReason() GetSqlWarehouseHealthFailureReasonPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealth) *GetSqlWarehouseHealthFailureReason { + if v == nil { + return nil + } + return v.FailureReason + }).(GetSqlWarehouseHealthFailureReasonPtrOutput) +} + +func (o GetSqlWarehouseHealthPtrOutput) Message() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealth) *string { + if v == nil { + return nil + } + return v.Message + }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthPtrOutput) Status() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealth) *string { + if v == nil { + return nil + } + return v.Status + }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthPtrOutput) Summary() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealth) *string { + if v == nil { + return nil + } + return v.Summary + }).(pulumi.StringPtrOutput) +} + +type GetSqlWarehouseHealthFailureReason struct { + Code *string `pulumi:"code"` + Parameters map[string]interface{} `pulumi:"parameters"` + Type *string `pulumi:"type"` +} + +// GetSqlWarehouseHealthFailureReasonInput is an input type that accepts GetSqlWarehouseHealthFailureReasonArgs and GetSqlWarehouseHealthFailureReasonOutput values. +// You can construct a concrete instance of `GetSqlWarehouseHealthFailureReasonInput` via: +// +// GetSqlWarehouseHealthFailureReasonArgs{...} +type GetSqlWarehouseHealthFailureReasonInput interface { + pulumi.Input + + ToGetSqlWarehouseHealthFailureReasonOutput() GetSqlWarehouseHealthFailureReasonOutput + ToGetSqlWarehouseHealthFailureReasonOutputWithContext(context.Context) GetSqlWarehouseHealthFailureReasonOutput +} + +type GetSqlWarehouseHealthFailureReasonArgs struct { + Code pulumi.StringPtrInput `pulumi:"code"` + Parameters pulumi.MapInput `pulumi:"parameters"` + Type pulumi.StringPtrInput `pulumi:"type"` +} + +func (GetSqlWarehouseHealthFailureReasonArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetSqlWarehouseHealthFailureReason)(nil)).Elem() +} + +func (i GetSqlWarehouseHealthFailureReasonArgs) ToGetSqlWarehouseHealthFailureReasonOutput() GetSqlWarehouseHealthFailureReasonOutput { + return i.ToGetSqlWarehouseHealthFailureReasonOutputWithContext(context.Background()) +} + +func (i GetSqlWarehouseHealthFailureReasonArgs) ToGetSqlWarehouseHealthFailureReasonOutputWithContext(ctx context.Context) GetSqlWarehouseHealthFailureReasonOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetSqlWarehouseHealthFailureReasonOutput) +} + +func (i GetSqlWarehouseHealthFailureReasonArgs) ToGetSqlWarehouseHealthFailureReasonPtrOutput() GetSqlWarehouseHealthFailureReasonPtrOutput { + return i.ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(context.Background()) +} + +func (i GetSqlWarehouseHealthFailureReasonArgs) ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthFailureReasonPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetSqlWarehouseHealthFailureReasonOutput).ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(ctx) +} + +// GetSqlWarehouseHealthFailureReasonPtrInput is an input type that accepts GetSqlWarehouseHealthFailureReasonArgs, GetSqlWarehouseHealthFailureReasonPtr and GetSqlWarehouseHealthFailureReasonPtrOutput values. +// You can construct a concrete instance of `GetSqlWarehouseHealthFailureReasonPtrInput` via: +// +// GetSqlWarehouseHealthFailureReasonArgs{...} +// +// or: +// +// nil +type GetSqlWarehouseHealthFailureReasonPtrInput interface { + pulumi.Input + + ToGetSqlWarehouseHealthFailureReasonPtrOutput() GetSqlWarehouseHealthFailureReasonPtrOutput + ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(context.Context) GetSqlWarehouseHealthFailureReasonPtrOutput +} + +type getSqlWarehouseHealthFailureReasonPtrType GetSqlWarehouseHealthFailureReasonArgs + +func GetSqlWarehouseHealthFailureReasonPtr(v *GetSqlWarehouseHealthFailureReasonArgs) GetSqlWarehouseHealthFailureReasonPtrInput { + return (*getSqlWarehouseHealthFailureReasonPtrType)(v) +} + +func (*getSqlWarehouseHealthFailureReasonPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetSqlWarehouseHealthFailureReason)(nil)).Elem() +} + +func (i *getSqlWarehouseHealthFailureReasonPtrType) ToGetSqlWarehouseHealthFailureReasonPtrOutput() GetSqlWarehouseHealthFailureReasonPtrOutput { + return i.ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(context.Background()) +} + +func (i *getSqlWarehouseHealthFailureReasonPtrType) ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthFailureReasonPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetSqlWarehouseHealthFailureReasonPtrOutput) +} + +type GetSqlWarehouseHealthFailureReasonOutput struct{ *pulumi.OutputState } + +func (GetSqlWarehouseHealthFailureReasonOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetSqlWarehouseHealthFailureReason)(nil)).Elem() +} + +func (o GetSqlWarehouseHealthFailureReasonOutput) ToGetSqlWarehouseHealthFailureReasonOutput() GetSqlWarehouseHealthFailureReasonOutput { + return o +} + +func (o GetSqlWarehouseHealthFailureReasonOutput) ToGetSqlWarehouseHealthFailureReasonOutputWithContext(ctx context.Context) GetSqlWarehouseHealthFailureReasonOutput { + return o +} + +func (o GetSqlWarehouseHealthFailureReasonOutput) ToGetSqlWarehouseHealthFailureReasonPtrOutput() GetSqlWarehouseHealthFailureReasonPtrOutput { + return o.ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(context.Background()) +} + +func (o GetSqlWarehouseHealthFailureReasonOutput) ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthFailureReasonPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetSqlWarehouseHealthFailureReason) *GetSqlWarehouseHealthFailureReason { + return &v + }).(GetSqlWarehouseHealthFailureReasonPtrOutput) +} + +func (o GetSqlWarehouseHealthFailureReasonOutput) Code() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseHealthFailureReason) *string { return v.Code }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthFailureReasonOutput) Parameters() pulumi.MapOutput { + return o.ApplyT(func(v GetSqlWarehouseHealthFailureReason) map[string]interface{} { return v.Parameters }).(pulumi.MapOutput) +} + +func (o GetSqlWarehouseHealthFailureReasonOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseHealthFailureReason) *string { return v.Type }).(pulumi.StringPtrOutput) +} + +type GetSqlWarehouseHealthFailureReasonPtrOutput struct{ *pulumi.OutputState } + +func (GetSqlWarehouseHealthFailureReasonPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetSqlWarehouseHealthFailureReason)(nil)).Elem() +} + +func (o GetSqlWarehouseHealthFailureReasonPtrOutput) ToGetSqlWarehouseHealthFailureReasonPtrOutput() GetSqlWarehouseHealthFailureReasonPtrOutput { + return o +} + +func (o GetSqlWarehouseHealthFailureReasonPtrOutput) ToGetSqlWarehouseHealthFailureReasonPtrOutputWithContext(ctx context.Context) GetSqlWarehouseHealthFailureReasonPtrOutput { + return o +} + +func (o GetSqlWarehouseHealthFailureReasonPtrOutput) Elem() GetSqlWarehouseHealthFailureReasonOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealthFailureReason) GetSqlWarehouseHealthFailureReason { + if v != nil { + return *v + } + var ret GetSqlWarehouseHealthFailureReason + return ret + }).(GetSqlWarehouseHealthFailureReasonOutput) +} + +func (o GetSqlWarehouseHealthFailureReasonPtrOutput) Code() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealthFailureReason) *string { + if v == nil { + return nil + } + return v.Code + }).(pulumi.StringPtrOutput) +} + +func (o GetSqlWarehouseHealthFailureReasonPtrOutput) Parameters() pulumi.MapOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealthFailureReason) map[string]interface{} { + if v == nil { + return nil + } + return v.Parameters + }).(pulumi.MapOutput) +} + +func (o GetSqlWarehouseHealthFailureReasonPtrOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetSqlWarehouseHealthFailureReason) *string { + if v == nil { + return nil + } + return v.Type + }).(pulumi.StringPtrOutput) +} + type GetSqlWarehouseOdbcParams struct { - Host *string `pulumi:"host"` Hostname *string `pulumi:"hostname"` - Path string `pulumi:"path"` - Port int `pulumi:"port"` - Protocol string `pulumi:"protocol"` + Path *string `pulumi:"path"` + Port *int `pulumi:"port"` + Protocol *string `pulumi:"protocol"` } // GetSqlWarehouseOdbcParamsInput is an input type that accepts GetSqlWarehouseOdbcParamsArgs and GetSqlWarehouseOdbcParamsOutput values. @@ -75495,11 +76602,10 @@ type GetSqlWarehouseOdbcParamsInput interface { } type GetSqlWarehouseOdbcParamsArgs struct { - Host pulumi.StringPtrInput `pulumi:"host"` Hostname pulumi.StringPtrInput `pulumi:"hostname"` - Path pulumi.StringInput `pulumi:"path"` - Port pulumi.IntInput `pulumi:"port"` - Protocol pulumi.StringInput `pulumi:"protocol"` + Path pulumi.StringPtrInput `pulumi:"path"` + Port pulumi.IntPtrInput `pulumi:"port"` + Protocol pulumi.StringPtrInput `pulumi:"protocol"` } func (GetSqlWarehouseOdbcParamsArgs) ElementType() reflect.Type { @@ -75579,24 +76685,20 @@ func (o GetSqlWarehouseOdbcParamsOutput) ToGetSqlWarehouseOdbcParamsPtrOutputWit }).(GetSqlWarehouseOdbcParamsPtrOutput) } -func (o GetSqlWarehouseOdbcParamsOutput) Host() pulumi.StringPtrOutput { - return o.ApplyT(func(v GetSqlWarehouseOdbcParams) *string { return v.Host }).(pulumi.StringPtrOutput) -} - func (o GetSqlWarehouseOdbcParamsOutput) Hostname() pulumi.StringPtrOutput { return o.ApplyT(func(v GetSqlWarehouseOdbcParams) *string { return v.Hostname }).(pulumi.StringPtrOutput) } -func (o GetSqlWarehouseOdbcParamsOutput) Path() pulumi.StringOutput { - return o.ApplyT(func(v GetSqlWarehouseOdbcParams) string { return v.Path }).(pulumi.StringOutput) +func (o GetSqlWarehouseOdbcParamsOutput) Path() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseOdbcParams) *string { return v.Path }).(pulumi.StringPtrOutput) } -func (o GetSqlWarehouseOdbcParamsOutput) Port() pulumi.IntOutput { - return o.ApplyT(func(v GetSqlWarehouseOdbcParams) int { return v.Port }).(pulumi.IntOutput) +func (o GetSqlWarehouseOdbcParamsOutput) Port() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseOdbcParams) *int { return v.Port }).(pulumi.IntPtrOutput) } -func (o GetSqlWarehouseOdbcParamsOutput) Protocol() pulumi.StringOutput { - return o.ApplyT(func(v GetSqlWarehouseOdbcParams) string { return v.Protocol }).(pulumi.StringOutput) +func (o GetSqlWarehouseOdbcParamsOutput) Protocol() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseOdbcParams) *string { return v.Protocol }).(pulumi.StringPtrOutput) } type GetSqlWarehouseOdbcParamsPtrOutput struct{ *pulumi.OutputState } @@ -75623,15 +76725,6 @@ func (o GetSqlWarehouseOdbcParamsPtrOutput) Elem() GetSqlWarehouseOdbcParamsOutp }).(GetSqlWarehouseOdbcParamsOutput) } -func (o GetSqlWarehouseOdbcParamsPtrOutput) Host() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetSqlWarehouseOdbcParams) *string { - if v == nil { - return nil - } - return v.Host - }).(pulumi.StringPtrOutput) -} - func (o GetSqlWarehouseOdbcParamsPtrOutput) Hostname() pulumi.StringPtrOutput { return o.ApplyT(func(v *GetSqlWarehouseOdbcParams) *string { if v == nil { @@ -75646,7 +76739,7 @@ func (o GetSqlWarehouseOdbcParamsPtrOutput) Path() pulumi.StringPtrOutput { if v == nil { return nil } - return &v.Path + return v.Path }).(pulumi.StringPtrOutput) } @@ -75655,7 +76748,7 @@ func (o GetSqlWarehouseOdbcParamsPtrOutput) Port() pulumi.IntPtrOutput { if v == nil { return nil } - return &v.Port + return v.Port }).(pulumi.IntPtrOutput) } @@ -75664,7 +76757,7 @@ func (o GetSqlWarehouseOdbcParamsPtrOutput) Protocol() pulumi.StringPtrOutput { if v == nil { return nil } - return &v.Protocol + return v.Protocol }).(pulumi.StringPtrOutput) } @@ -75802,8 +76895,8 @@ func (o GetSqlWarehouseTagsPtrOutput) CustomTags() GetSqlWarehouseTagsCustomTagA } type GetSqlWarehouseTagsCustomTag struct { - Key string `pulumi:"key"` - Value string `pulumi:"value"` + Key *string `pulumi:"key"` + Value *string `pulumi:"value"` } // GetSqlWarehouseTagsCustomTagInput is an input type that accepts GetSqlWarehouseTagsCustomTagArgs and GetSqlWarehouseTagsCustomTagOutput values. @@ -75818,8 +76911,8 @@ type GetSqlWarehouseTagsCustomTagInput interface { } type GetSqlWarehouseTagsCustomTagArgs struct { - Key pulumi.StringInput `pulumi:"key"` - Value pulumi.StringInput `pulumi:"value"` + Key pulumi.StringPtrInput `pulumi:"key"` + Value pulumi.StringPtrInput `pulumi:"value"` } func (GetSqlWarehouseTagsCustomTagArgs) ElementType() reflect.Type { @@ -75873,12 +76966,12 @@ func (o GetSqlWarehouseTagsCustomTagOutput) ToGetSqlWarehouseTagsCustomTagOutput return o } -func (o GetSqlWarehouseTagsCustomTagOutput) Key() pulumi.StringOutput { - return o.ApplyT(func(v GetSqlWarehouseTagsCustomTag) string { return v.Key }).(pulumi.StringOutput) +func (o GetSqlWarehouseTagsCustomTagOutput) Key() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseTagsCustomTag) *string { return v.Key }).(pulumi.StringPtrOutput) } -func (o GetSqlWarehouseTagsCustomTagOutput) Value() pulumi.StringOutput { - return o.ApplyT(func(v GetSqlWarehouseTagsCustomTag) string { return v.Value }).(pulumi.StringOutput) +func (o GetSqlWarehouseTagsCustomTagOutput) Value() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetSqlWarehouseTagsCustomTag) *string { return v.Value }).(pulumi.StringPtrOutput) } type GetSqlWarehouseTagsCustomTagArrayOutput struct{ *pulumi.OutputState } @@ -76386,6 +77479,10 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*SqlAlertOptionsPtrInput)(nil)).Elem(), SqlAlertOptionsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointChannelInput)(nil)).Elem(), SqlEndpointChannelArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointChannelPtrInput)(nil)).Elem(), SqlEndpointChannelArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointHealthInput)(nil)).Elem(), SqlEndpointHealthArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointHealthArrayInput)(nil)).Elem(), SqlEndpointHealthArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointHealthFailureReasonInput)(nil)).Elem(), SqlEndpointHealthFailureReasonArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointHealthFailureReasonPtrInput)(nil)).Elem(), SqlEndpointHealthFailureReasonArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointOdbcParamsInput)(nil)).Elem(), SqlEndpointOdbcParamsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointOdbcParamsPtrInput)(nil)).Elem(), SqlEndpointOdbcParamsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SqlEndpointTagsInput)(nil)).Elem(), SqlEndpointTagsArgs{}) @@ -76500,6 +77597,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetClusterClusterInfoInitScriptWorkspacePtrInput)(nil)).Elem(), GetClusterClusterInfoInitScriptWorkspaceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetClusterClusterInfoTerminationReasonInput)(nil)).Elem(), GetClusterClusterInfoTerminationReasonArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetClusterClusterInfoTerminationReasonPtrInput)(nil)).Elem(), GetClusterClusterInfoTerminationReasonArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCurrentMetastoreMetastoreInfoInput)(nil)).Elem(), GetCurrentMetastoreMetastoreInfoArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCurrentMetastoreMetastoreInfoPtrInput)(nil)).Elem(), GetCurrentMetastoreMetastoreInfoArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetDbfsFilePathsPathListInput)(nil)).Elem(), GetDbfsFilePathsPathListArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetDbfsFilePathsPathListArrayInput)(nil)).Elem(), GetDbfsFilePathsPathListArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetInstancePoolPoolInfoInput)(nil)).Elem(), GetInstancePoolPoolInfoArgs{}) @@ -76811,6 +77910,10 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetShareObjectPartitionValueArrayInput)(nil)).Elem(), GetShareObjectPartitionValueArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseChannelInput)(nil)).Elem(), GetSqlWarehouseChannelArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseChannelPtrInput)(nil)).Elem(), GetSqlWarehouseChannelArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseHealthInput)(nil)).Elem(), GetSqlWarehouseHealthArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseHealthPtrInput)(nil)).Elem(), GetSqlWarehouseHealthArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseHealthFailureReasonInput)(nil)).Elem(), GetSqlWarehouseHealthFailureReasonArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseHealthFailureReasonPtrInput)(nil)).Elem(), GetSqlWarehouseHealthFailureReasonArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseOdbcParamsInput)(nil)).Elem(), GetSqlWarehouseOdbcParamsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseOdbcParamsPtrInput)(nil)).Elem(), GetSqlWarehouseOdbcParamsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetSqlWarehouseTagsInput)(nil)).Elem(), GetSqlWarehouseTagsArgs{}) @@ -77301,6 +78404,10 @@ func init() { pulumi.RegisterOutputType(SqlAlertOptionsPtrOutput{}) pulumi.RegisterOutputType(SqlEndpointChannelOutput{}) pulumi.RegisterOutputType(SqlEndpointChannelPtrOutput{}) + pulumi.RegisterOutputType(SqlEndpointHealthOutput{}) + pulumi.RegisterOutputType(SqlEndpointHealthArrayOutput{}) + pulumi.RegisterOutputType(SqlEndpointHealthFailureReasonOutput{}) + pulumi.RegisterOutputType(SqlEndpointHealthFailureReasonPtrOutput{}) pulumi.RegisterOutputType(SqlEndpointOdbcParamsOutput{}) pulumi.RegisterOutputType(SqlEndpointOdbcParamsPtrOutput{}) pulumi.RegisterOutputType(SqlEndpointTagsOutput{}) @@ -77415,6 +78522,8 @@ func init() { pulumi.RegisterOutputType(GetClusterClusterInfoInitScriptWorkspacePtrOutput{}) pulumi.RegisterOutputType(GetClusterClusterInfoTerminationReasonOutput{}) pulumi.RegisterOutputType(GetClusterClusterInfoTerminationReasonPtrOutput{}) + pulumi.RegisterOutputType(GetCurrentMetastoreMetastoreInfoOutput{}) + pulumi.RegisterOutputType(GetCurrentMetastoreMetastoreInfoPtrOutput{}) pulumi.RegisterOutputType(GetDbfsFilePathsPathListOutput{}) pulumi.RegisterOutputType(GetDbfsFilePathsPathListArrayOutput{}) pulumi.RegisterOutputType(GetInstancePoolPoolInfoOutput{}) @@ -77726,6 +78835,10 @@ func init() { pulumi.RegisterOutputType(GetShareObjectPartitionValueArrayOutput{}) pulumi.RegisterOutputType(GetSqlWarehouseChannelOutput{}) pulumi.RegisterOutputType(GetSqlWarehouseChannelPtrOutput{}) + pulumi.RegisterOutputType(GetSqlWarehouseHealthOutput{}) + pulumi.RegisterOutputType(GetSqlWarehouseHealthPtrOutput{}) + pulumi.RegisterOutputType(GetSqlWarehouseHealthFailureReasonOutput{}) + pulumi.RegisterOutputType(GetSqlWarehouseHealthFailureReasonPtrOutput{}) pulumi.RegisterOutputType(GetSqlWarehouseOdbcParamsOutput{}) pulumi.RegisterOutputType(GetSqlWarehouseOdbcParamsPtrOutput{}) pulumi.RegisterOutputType(GetSqlWarehouseTagsOutput{}) diff --git a/sdk/go/databricks/recipient.go b/sdk/go/databricks/recipient.go index 6950f3e7..40b31a8f 100644 --- a/sdk/go/databricks/recipient.go +++ b/sdk/go/databricks/recipient.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with workspace-level provider! +// // Within a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to. // // A `Recipient` is contained within Metastore and can have permissions to `SELECT` from a list of shares. diff --git a/sdk/go/databricks/registeredModel.go b/sdk/go/databricks/registeredModel.go index 36c010a9..2a733295 100644 --- a/sdk/go/databricks/registeredModel.go +++ b/sdk/go/databricks/registeredModel.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with workspace-level provider! +// // This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. // // ## Example Usage diff --git a/sdk/go/databricks/repo.go b/sdk/go/databricks/repo.go index 9652e8b0..ffebe720 100644 --- a/sdk/go/databricks/repo.go +++ b/sdk/go/databricks/repo.go @@ -37,6 +37,8 @@ type Repo struct { Tag pulumi.StringPtrOutput `pulumi:"tag"` // The URL of the Git Repository to clone from. If the value changes, repo is re-created. Url pulumi.StringOutput `pulumi:"url"` + // path on Workspace File System (WSFS) in form of `/Workspace` + `path` + WorkspacePath pulumi.StringOutput `pulumi:"workspacePath"` } // NewRepo registers a new resource with the given unique name, arguments, and options. @@ -85,6 +87,8 @@ type repoState struct { Tag *string `pulumi:"tag"` // The URL of the Git Repository to clone from. If the value changes, repo is re-created. Url *string `pulumi:"url"` + // path on Workspace File System (WSFS) in form of `/Workspace` + `path` + WorkspacePath *string `pulumi:"workspacePath"` } type RepoState struct { @@ -101,6 +105,8 @@ type RepoState struct { Tag pulumi.StringPtrInput // The URL of the Git Repository to clone from. If the value changes, repo is re-created. Url pulumi.StringPtrInput + // path on Workspace File System (WSFS) in form of `/Workspace` + `path` + WorkspacePath pulumi.StringPtrInput } func (RepoState) ElementType() reflect.Type { @@ -261,6 +267,11 @@ func (o RepoOutput) Url() pulumi.StringOutput { return o.ApplyT(func(v *Repo) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput) } +// path on Workspace File System (WSFS) in form of `/Workspace` + `path` +func (o RepoOutput) WorkspacePath() pulumi.StringOutput { + return o.ApplyT(func(v *Repo) pulumi.StringOutput { return v.WorkspacePath }).(pulumi.StringOutput) +} + type RepoArrayOutput struct{ *pulumi.OutputState } func (RepoArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/schema.go b/sdk/go/databricks/schema.go index 396567f7..0f7e7d31 100644 --- a/sdk/go/databricks/schema.go +++ b/sdk/go/databricks/schema.go @@ -12,6 +12,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be only used with workspace-level provider! +// // Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. // // A `Schema` is contained within Catalog and can contain tables & views. diff --git a/sdk/go/databricks/sqlEndpoint.go b/sdk/go/databricks/sqlEndpoint.go index 0d09bb24..b18de05e 100644 --- a/sdk/go/databricks/sqlEndpoint.go +++ b/sdk/go/databricks/sqlEndpoint.go @@ -85,6 +85,8 @@ type SqlEndpoint struct { Channel SqlEndpointChannelPtrOutput `pulumi:"channel"` // The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". ClusterSize pulumi.StringOutput `pulumi:"clusterSize"` + // The username of the user who created the endpoint. + CreatorName pulumi.StringOutput `pulumi:"creatorName"` // ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. DataSourceId pulumi.StringOutput `pulumi:"dataSourceId"` // Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional and is enabled by default. @@ -94,8 +96,10 @@ type SqlEndpoint struct { // - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). // // - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). - EnableServerlessCompute pulumi.BoolPtrOutput `pulumi:"enableServerlessCompute"` - InstanceProfileArn pulumi.StringPtrOutput `pulumi:"instanceProfileArn"` + EnableServerlessCompute pulumi.BoolPtrOutput `pulumi:"enableServerlessCompute"` + // Health status of the endpoint. + Healths SqlEndpointHealthArrayOutput `pulumi:"healths"` + InstanceProfileArn pulumi.StringPtrOutput `pulumi:"instanceProfileArn"` // JDBC connection string. JdbcUrl pulumi.StringOutput `pulumi:"jdbcUrl"` // Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. @@ -103,16 +107,20 @@ type SqlEndpoint struct { // Minimum number of clusters available when a SQL warehouse is running. The default is `1`. MinNumClusters pulumi.IntPtrOutput `pulumi:"minNumClusters"` // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - Name pulumi.StringOutput `pulumi:"name"` - NumClusters pulumi.IntPtrOutput `pulumi:"numClusters"` + Name pulumi.StringOutput `pulumi:"name"` + // The current number of clusters used by the endpoint. + NumActiveSessions pulumi.IntOutput `pulumi:"numActiveSessions"` + // The current number of clusters used by the endpoint. + NumClusters pulumi.IntOutput `pulumi:"numClusters"` // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. OdbcParams SqlEndpointOdbcParamsOutput `pulumi:"odbcParams"` // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. SpotInstancePolicy pulumi.StringPtrOutput `pulumi:"spotInstancePolicy"` - State pulumi.StringOutput `pulumi:"state"` + // The current state of the endpoint. + State pulumi.StringOutput `pulumi:"state"` // Databricks tags all endpoint resources with these tags. Tags SqlEndpointTagsPtrOutput `pulumi:"tags"` - // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. WarehouseType pulumi.StringPtrOutput `pulumi:"warehouseType"` } @@ -155,6 +163,8 @@ type sqlEndpointState struct { Channel *SqlEndpointChannel `pulumi:"channel"` // The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". ClusterSize *string `pulumi:"clusterSize"` + // The username of the user who created the endpoint. + CreatorName *string `pulumi:"creatorName"` // ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. DataSourceId *string `pulumi:"dataSourceId"` // Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional and is enabled by default. @@ -164,8 +174,10 @@ type sqlEndpointState struct { // - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). // // - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). - EnableServerlessCompute *bool `pulumi:"enableServerlessCompute"` - InstanceProfileArn *string `pulumi:"instanceProfileArn"` + EnableServerlessCompute *bool `pulumi:"enableServerlessCompute"` + // Health status of the endpoint. + Healths []SqlEndpointHealth `pulumi:"healths"` + InstanceProfileArn *string `pulumi:"instanceProfileArn"` // JDBC connection string. JdbcUrl *string `pulumi:"jdbcUrl"` // Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. @@ -173,16 +185,20 @@ type sqlEndpointState struct { // Minimum number of clusters available when a SQL warehouse is running. The default is `1`. MinNumClusters *int `pulumi:"minNumClusters"` // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - Name *string `pulumi:"name"` - NumClusters *int `pulumi:"numClusters"` + Name *string `pulumi:"name"` + // The current number of clusters used by the endpoint. + NumActiveSessions *int `pulumi:"numActiveSessions"` + // The current number of clusters used by the endpoint. + NumClusters *int `pulumi:"numClusters"` // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. OdbcParams *SqlEndpointOdbcParams `pulumi:"odbcParams"` // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. SpotInstancePolicy *string `pulumi:"spotInstancePolicy"` - State *string `pulumi:"state"` + // The current state of the endpoint. + State *string `pulumi:"state"` // Databricks tags all endpoint resources with these tags. Tags *SqlEndpointTags `pulumi:"tags"` - // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. WarehouseType *string `pulumi:"warehouseType"` } @@ -193,6 +209,8 @@ type SqlEndpointState struct { Channel SqlEndpointChannelPtrInput // The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". ClusterSize pulumi.StringPtrInput + // The username of the user who created the endpoint. + CreatorName pulumi.StringPtrInput // ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. DataSourceId pulumi.StringPtrInput // Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional and is enabled by default. @@ -203,7 +221,9 @@ type SqlEndpointState struct { // // - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). EnableServerlessCompute pulumi.BoolPtrInput - InstanceProfileArn pulumi.StringPtrInput + // Health status of the endpoint. + Healths SqlEndpointHealthArrayInput + InstanceProfileArn pulumi.StringPtrInput // JDBC connection string. JdbcUrl pulumi.StringPtrInput // Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. @@ -211,16 +231,20 @@ type SqlEndpointState struct { // Minimum number of clusters available when a SQL warehouse is running. The default is `1`. MinNumClusters pulumi.IntPtrInput // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - Name pulumi.StringPtrInput + Name pulumi.StringPtrInput + // The current number of clusters used by the endpoint. + NumActiveSessions pulumi.IntPtrInput + // The current number of clusters used by the endpoint. NumClusters pulumi.IntPtrInput // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. OdbcParams SqlEndpointOdbcParamsPtrInput // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. SpotInstancePolicy pulumi.StringPtrInput - State pulumi.StringPtrInput + // The current state of the endpoint. + State pulumi.StringPtrInput // Databricks tags all endpoint resources with these tags. Tags SqlEndpointTagsPtrInput - // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. WarehouseType pulumi.StringPtrInput } @@ -246,23 +270,17 @@ type sqlEndpointArgs struct { // - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). EnableServerlessCompute *bool `pulumi:"enableServerlessCompute"` InstanceProfileArn *string `pulumi:"instanceProfileArn"` - // JDBC connection string. - JdbcUrl *string `pulumi:"jdbcUrl"` // Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. MaxNumClusters *int `pulumi:"maxNumClusters"` // Minimum number of clusters available when a SQL warehouse is running. The default is `1`. MinNumClusters *int `pulumi:"minNumClusters"` // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - Name *string `pulumi:"name"` - NumClusters *int `pulumi:"numClusters"` - // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - OdbcParams *SqlEndpointOdbcParams `pulumi:"odbcParams"` + Name *string `pulumi:"name"` // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. SpotInstancePolicy *string `pulumi:"spotInstancePolicy"` - State *string `pulumi:"state"` // Databricks tags all endpoint resources with these tags. Tags *SqlEndpointTags `pulumi:"tags"` - // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. WarehouseType *string `pulumi:"warehouseType"` } @@ -285,23 +303,17 @@ type SqlEndpointArgs struct { // - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). EnableServerlessCompute pulumi.BoolPtrInput InstanceProfileArn pulumi.StringPtrInput - // JDBC connection string. - JdbcUrl pulumi.StringPtrInput // Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. MaxNumClusters pulumi.IntPtrInput // Minimum number of clusters available when a SQL warehouse is running. The default is `1`. MinNumClusters pulumi.IntPtrInput // Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - Name pulumi.StringPtrInput - NumClusters pulumi.IntPtrInput - // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - OdbcParams SqlEndpointOdbcParamsPtrInput + Name pulumi.StringPtrInput // The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. SpotInstancePolicy pulumi.StringPtrInput - State pulumi.StringPtrInput // Databricks tags all endpoint resources with these tags. Tags SqlEndpointTagsPtrInput - // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + // SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. WarehouseType pulumi.StringPtrInput } @@ -407,6 +419,11 @@ func (o SqlEndpointOutput) ClusterSize() pulumi.StringOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.StringOutput { return v.ClusterSize }).(pulumi.StringOutput) } +// The username of the user who created the endpoint. +func (o SqlEndpointOutput) CreatorName() pulumi.StringOutput { + return o.ApplyT(func(v *SqlEndpoint) pulumi.StringOutput { return v.CreatorName }).(pulumi.StringOutput) +} + // ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. func (o SqlEndpointOutput) DataSourceId() pulumi.StringOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.StringOutput { return v.DataSourceId }).(pulumi.StringOutput) @@ -426,6 +443,11 @@ func (o SqlEndpointOutput) EnableServerlessCompute() pulumi.BoolPtrOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.BoolPtrOutput { return v.EnableServerlessCompute }).(pulumi.BoolPtrOutput) } +// Health status of the endpoint. +func (o SqlEndpointOutput) Healths() SqlEndpointHealthArrayOutput { + return o.ApplyT(func(v *SqlEndpoint) SqlEndpointHealthArrayOutput { return v.Healths }).(SqlEndpointHealthArrayOutput) +} + func (o SqlEndpointOutput) InstanceProfileArn() pulumi.StringPtrOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.StringPtrOutput { return v.InstanceProfileArn }).(pulumi.StringPtrOutput) } @@ -450,8 +472,14 @@ func (o SqlEndpointOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) } -func (o SqlEndpointOutput) NumClusters() pulumi.IntPtrOutput { - return o.ApplyT(func(v *SqlEndpoint) pulumi.IntPtrOutput { return v.NumClusters }).(pulumi.IntPtrOutput) +// The current number of clusters used by the endpoint. +func (o SqlEndpointOutput) NumActiveSessions() pulumi.IntOutput { + return o.ApplyT(func(v *SqlEndpoint) pulumi.IntOutput { return v.NumActiveSessions }).(pulumi.IntOutput) +} + +// The current number of clusters used by the endpoint. +func (o SqlEndpointOutput) NumClusters() pulumi.IntOutput { + return o.ApplyT(func(v *SqlEndpoint) pulumi.IntOutput { return v.NumClusters }).(pulumi.IntOutput) } // ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -464,6 +492,7 @@ func (o SqlEndpointOutput) SpotInstancePolicy() pulumi.StringPtrOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.StringPtrOutput { return v.SpotInstancePolicy }).(pulumi.StringPtrOutput) } +// The current state of the endpoint. func (o SqlEndpointOutput) State() pulumi.StringOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.StringOutput { return v.State }).(pulumi.StringOutput) } @@ -473,7 +502,7 @@ func (o SqlEndpointOutput) Tags() SqlEndpointTagsPtrOutput { return o.ApplyT(func(v *SqlEndpoint) SqlEndpointTagsPtrOutput { return v.Tags }).(SqlEndpointTagsPtrOutput) } -// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. +// SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. func (o SqlEndpointOutput) WarehouseType() pulumi.StringPtrOutput { return o.ApplyT(func(v *SqlEndpoint) pulumi.StringPtrOutput { return v.WarehouseType }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/databricks/storageCredential.go b/sdk/go/databricks/storageCredential.go index 4041b34a..7ecaa7f6 100644 --- a/sdk/go/databricks/storageCredential.go +++ b/sdk/go/databricks/storageCredential.go @@ -11,6 +11,8 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// > **Note** This resource could be used with account or workspace-level provider. +// // To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: // // - `StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. @@ -175,6 +177,8 @@ type StorageCredential struct { Owner pulumi.StringOutput `pulumi:"owner"` // Indicates whether the storage credential is only usable for read operations. ReadOnly pulumi.BoolPtrOutput `pulumi:"readOnly"` + // Suppress validation errors if any & force save the storage credential. + SkipValidation pulumi.BoolPtrOutput `pulumi:"skipValidation"` } // NewStorageCredential registers a new resource with the given unique name, arguments, and options. @@ -226,6 +230,8 @@ type storageCredentialState struct { Owner *string `pulumi:"owner"` // Indicates whether the storage credential is only usable for read operations. ReadOnly *bool `pulumi:"readOnly"` + // Suppress validation errors if any & force save the storage credential. + SkipValidation *bool `pulumi:"skipValidation"` } type StorageCredentialState struct { @@ -248,6 +254,8 @@ type StorageCredentialState struct { Owner pulumi.StringPtrInput // Indicates whether the storage credential is only usable for read operations. ReadOnly pulumi.BoolPtrInput + // Suppress validation errors if any & force save the storage credential. + SkipValidation pulumi.BoolPtrInput } func (StorageCredentialState) ElementType() reflect.Type { @@ -274,6 +282,8 @@ type storageCredentialArgs struct { Owner *string `pulumi:"owner"` // Indicates whether the storage credential is only usable for read operations. ReadOnly *bool `pulumi:"readOnly"` + // Suppress validation errors if any & force save the storage credential. + SkipValidation *bool `pulumi:"skipValidation"` } // The set of arguments for constructing a StorageCredential resource. @@ -297,6 +307,8 @@ type StorageCredentialArgs struct { Owner pulumi.StringPtrInput // Indicates whether the storage credential is only usable for read operations. ReadOnly pulumi.BoolPtrInput + // Suppress validation errors if any & force save the storage credential. + SkipValidation pulumi.BoolPtrInput } func (StorageCredentialArgs) ElementType() reflect.Type { @@ -449,6 +461,11 @@ func (o StorageCredentialOutput) ReadOnly() pulumi.BoolPtrOutput { return o.ApplyT(func(v *StorageCredential) pulumi.BoolPtrOutput { return v.ReadOnly }).(pulumi.BoolPtrOutput) } +// Suppress validation errors if any & force save the storage credential. +func (o StorageCredentialOutput) SkipValidation() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *StorageCredential) pulumi.BoolPtrOutput { return v.SkipValidation }).(pulumi.BoolPtrOutput) +} + type StorageCredentialArrayOutput struct{ *pulumi.OutputState } func (StorageCredentialArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/systemSchema.go b/sdk/go/databricks/systemSchema.go index 1f69dbdc..7f2c01e8 100644 --- a/sdk/go/databricks/systemSchema.go +++ b/sdk/go/databricks/systemSchema.go @@ -13,9 +13,7 @@ import ( // > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). // -// > **Notes** -// -// Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. +// > **Note** This resource could be only used with workspace-level provider! // // Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. // diff --git a/sdk/go/databricks/volume.go b/sdk/go/databricks/volume.go index 81ea9abd..6e13c3e2 100644 --- a/sdk/go/databricks/volume.go +++ b/sdk/go/databricks/volume.go @@ -14,6 +14,8 @@ import ( // > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). // +// > **Note** This resource could be only used with workspace-level provider! +// // Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. // // A volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java b/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java index a3e0a203..5da47ddf 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java @@ -17,6 +17,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. * * > **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Connection.java b/sdk/java/src/main/java/com/pulumi/databricks/Connection.java index 91cf37ca..ef034761 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Connection.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Connection.java @@ -19,6 +19,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: * * - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java index fafdd803..f7ae9778 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java @@ -22,6 +22,8 @@ import com.pulumi.databricks.inputs.GetClustersPlainArgs; import com.pulumi.databricks.inputs.GetCurrentConfigArgs; import com.pulumi.databricks.inputs.GetCurrentConfigPlainArgs; +import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs; +import com.pulumi.databricks.inputs.GetCurrentMetastorePlainArgs; import com.pulumi.databricks.inputs.GetDbfsFileArgs; import com.pulumi.databricks.inputs.GetDbfsFilePathsArgs; import com.pulumi.databricks.inputs.GetDbfsFilePathsPlainArgs; @@ -86,6 +88,7 @@ import com.pulumi.databricks.outputs.GetClusterResult; import com.pulumi.databricks.outputs.GetClustersResult; import com.pulumi.databricks.outputs.GetCurrentConfigResult; +import com.pulumi.databricks.outputs.GetCurrentMetastoreResult; import com.pulumi.databricks.outputs.GetCurrentUserResult; import com.pulumi.databricks.outputs.GetDbfsFilePathsResult; import com.pulumi.databricks.outputs.GetDbfsFileResult; @@ -2302,6 +2305,306 @@ public static Output getCurrentConfig(GetCurrentConfigAr public static CompletableFuture getCurrentConfigPlain(GetCurrentConfigPlainArgs args, InvokeOptions options) { return Deployment.getInstance().invokeAsync("databricks:index/getCurrentConfig:getCurrentConfig", TypeShape.of(GetCurrentConfigResult.class), args, Utilities.withVersion(options)); } + /** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var this = DatabricksFunctions.getCurrentMetastore(); + * + * ctx.export("someMetastore", data.databricks_metastore().this().metastore_info()[0]); + * } + * } + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getCurrentMetastore() { + return getCurrentMetastore(GetCurrentMetastoreArgs.Empty, InvokeOptions.Empty); + } + /** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var this = DatabricksFunctions.getCurrentMetastore(); + * + * ctx.export("someMetastore", data.databricks_metastore().this().metastore_info()[0]); + * } + * } + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static CompletableFuture getCurrentMetastorePlain() { + return getCurrentMetastorePlain(GetCurrentMetastorePlainArgs.Empty, InvokeOptions.Empty); + } + /** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var this = DatabricksFunctions.getCurrentMetastore(); + * + * ctx.export("someMetastore", data.databricks_metastore().this().metastore_info()[0]); + * } + * } + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getCurrentMetastore(GetCurrentMetastoreArgs args) { + return getCurrentMetastore(args, InvokeOptions.Empty); + } + /** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var this = DatabricksFunctions.getCurrentMetastore(); + * + * ctx.export("someMetastore", data.databricks_metastore().this().metastore_info()[0]); + * } + * } + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static CompletableFuture getCurrentMetastorePlain(GetCurrentMetastorePlainArgs args) { + return getCurrentMetastorePlain(args, InvokeOptions.Empty); + } + /** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var this = DatabricksFunctions.getCurrentMetastore(); + * + * ctx.export("someMetastore", data.databricks_metastore().this().metastore_info()[0]); + * } + * } + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static Output getCurrentMetastore(GetCurrentMetastoreArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", TypeShape.of(GetCurrentMetastoreResult.class), args, Utilities.withVersion(options)); + } + /** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetCurrentMetastoreArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var this = DatabricksFunctions.getCurrentMetastore(); + * + * ctx.export("someMetastore", data.databricks_metastore().this().metastore_info()[0]); + * } + * } + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + * + */ + public static CompletableFuture getCurrentMetastorePlain(GetCurrentMetastorePlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("databricks:index/getCurrentMetastore:getCurrentMetastore", TypeShape.of(GetCurrentMetastoreResult.class), args, Utilities.withVersion(options)); + } /** * ## Exported attributes * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/DefaultNamespaceSetting.java b/sdk/java/src/main/java/com/pulumi/databricks/DefaultNamespaceSetting.java index e5d6c8af..823029b7 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/DefaultNamespaceSetting.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/DefaultNamespaceSetting.java @@ -15,6 +15,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with workspace-level provider! + * * The `databricks.DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. * Setting the default catalog for the workspace determines the catalog that is used when queries do not reference * a fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Directory.java b/sdk/java/src/main/java/com/pulumi/databricks/Directory.java index 570a79c5..2eb42969 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Directory.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Directory.java @@ -62,6 +62,20 @@ public Output objectId() { public Output path() { return this.path; } + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + @Export(name="workspacePath", refs={String.class}, tree="[0]") + private Output workspacePath; + + /** + * @return path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + public Output workspacePath() { + return this.workspacePath; + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java b/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java index fdb4b04e..634f9133 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java @@ -17,6 +17,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with workspace-level provider! + * * To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: * * - databricks.StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Grant.java b/sdk/java/src/main/java/com/pulumi/databricks/Grant.java new file mode 100644 index 00000000..75a8b9ab --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/Grant.java @@ -0,0 +1,159 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import com.pulumi.databricks.GrantArgs; +import com.pulumi.databricks.Utilities; +import com.pulumi.databricks.inputs.GrantState; +import java.lang.String; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nullable; + +@ResourceType(type="databricks:index/grant:Grant") +public class Grant extends com.pulumi.resources.CustomResource { + @Export(name="catalog", refs={String.class}, tree="[0]") + private Output catalog; + + public Output> catalog() { + return Codegen.optional(this.catalog); + } + @Export(name="externalLocation", refs={String.class}, tree="[0]") + private Output externalLocation; + + public Output> externalLocation() { + return Codegen.optional(this.externalLocation); + } + @Export(name="foreignConnection", refs={String.class}, tree="[0]") + private Output foreignConnection; + + public Output> foreignConnection() { + return Codegen.optional(this.foreignConnection); + } + @Export(name="function", refs={String.class}, tree="[0]") + private Output function; + + public Output> function() { + return Codegen.optional(this.function); + } + @Export(name="metastore", refs={String.class}, tree="[0]") + private Output metastore; + + public Output> metastore() { + return Codegen.optional(this.metastore); + } + @Export(name="model", refs={String.class}, tree="[0]") + private Output model; + + public Output> model() { + return Codegen.optional(this.model); + } + @Export(name="pipeline", refs={String.class}, tree="[0]") + private Output pipeline; + + public Output> pipeline() { + return Codegen.optional(this.pipeline); + } + @Export(name="principal", refs={String.class}, tree="[0]") + private Output principal; + + public Output principal() { + return this.principal; + } + @Export(name="privileges", refs={List.class,String.class}, tree="[0,1]") + private Output> privileges; + + public Output> privileges() { + return this.privileges; + } + @Export(name="recipient", refs={String.class}, tree="[0]") + private Output recipient; + + public Output> recipient() { + return Codegen.optional(this.recipient); + } + @Export(name="schema", refs={String.class}, tree="[0]") + private Output schema; + + public Output> schema() { + return Codegen.optional(this.schema); + } + @Export(name="share", refs={String.class}, tree="[0]") + private Output share; + + public Output> share() { + return Codegen.optional(this.share); + } + @Export(name="storageCredential", refs={String.class}, tree="[0]") + private Output storageCredential; + + public Output> storageCredential() { + return Codegen.optional(this.storageCredential); + } + @Export(name="table", refs={String.class}, tree="[0]") + private Output table; + + public Output> table() { + return Codegen.optional(this.table); + } + @Export(name="volume", refs={String.class}, tree="[0]") + private Output volume; + + public Output> volume() { + return Codegen.optional(this.volume); + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public Grant(String name) { + this(name, GrantArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public Grant(String name, GrantArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public Grant(String name, GrantArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("databricks:index/grant:Grant", name, args == null ? GrantArgs.Empty : args, makeResourceOptions(options, Codegen.empty())); + } + + private Grant(String name, Output id, @Nullable GrantState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("databricks:index/grant:Grant", name, state, makeResourceOptions(options, id)); + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static Grant get(String name, Output id, @Nullable GrantState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new Grant(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/GrantArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/GrantArgs.java new file mode 100644 index 00000000..1998c232 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/GrantArgs.java @@ -0,0 +1,313 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GrantArgs extends com.pulumi.resources.ResourceArgs { + + public static final GrantArgs Empty = new GrantArgs(); + + @Import(name="catalog") + private @Nullable Output catalog; + + public Optional> catalog() { + return Optional.ofNullable(this.catalog); + } + + @Import(name="externalLocation") + private @Nullable Output externalLocation; + + public Optional> externalLocation() { + return Optional.ofNullable(this.externalLocation); + } + + @Import(name="foreignConnection") + private @Nullable Output foreignConnection; + + public Optional> foreignConnection() { + return Optional.ofNullable(this.foreignConnection); + } + + @Import(name="function") + private @Nullable Output function; + + public Optional> function() { + return Optional.ofNullable(this.function); + } + + @Import(name="metastore") + private @Nullable Output metastore; + + public Optional> metastore() { + return Optional.ofNullable(this.metastore); + } + + @Import(name="model") + private @Nullable Output model; + + public Optional> model() { + return Optional.ofNullable(this.model); + } + + @Import(name="pipeline") + private @Nullable Output pipeline; + + public Optional> pipeline() { + return Optional.ofNullable(this.pipeline); + } + + @Import(name="principal", required=true) + private Output principal; + + public Output principal() { + return this.principal; + } + + @Import(name="privileges", required=true) + private Output> privileges; + + public Output> privileges() { + return this.privileges; + } + + @Import(name="recipient") + private @Nullable Output recipient; + + public Optional> recipient() { + return Optional.ofNullable(this.recipient); + } + + @Import(name="schema") + private @Nullable Output schema; + + public Optional> schema() { + return Optional.ofNullable(this.schema); + } + + @Import(name="share") + private @Nullable Output share; + + public Optional> share() { + return Optional.ofNullable(this.share); + } + + @Import(name="storageCredential") + private @Nullable Output storageCredential; + + public Optional> storageCredential() { + return Optional.ofNullable(this.storageCredential); + } + + @Import(name="table") + private @Nullable Output table; + + public Optional> table() { + return Optional.ofNullable(this.table); + } + + @Import(name="volume") + private @Nullable Output volume; + + public Optional> volume() { + return Optional.ofNullable(this.volume); + } + + private GrantArgs() {} + + private GrantArgs(GrantArgs $) { + this.catalog = $.catalog; + this.externalLocation = $.externalLocation; + this.foreignConnection = $.foreignConnection; + this.function = $.function; + this.metastore = $.metastore; + this.model = $.model; + this.pipeline = $.pipeline; + this.principal = $.principal; + this.privileges = $.privileges; + this.recipient = $.recipient; + this.schema = $.schema; + this.share = $.share; + this.storageCredential = $.storageCredential; + this.table = $.table; + this.volume = $.volume; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GrantArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GrantArgs $; + + public Builder() { + $ = new GrantArgs(); + } + + public Builder(GrantArgs defaults) { + $ = new GrantArgs(Objects.requireNonNull(defaults)); + } + + public Builder catalog(@Nullable Output catalog) { + $.catalog = catalog; + return this; + } + + public Builder catalog(String catalog) { + return catalog(Output.of(catalog)); + } + + public Builder externalLocation(@Nullable Output externalLocation) { + $.externalLocation = externalLocation; + return this; + } + + public Builder externalLocation(String externalLocation) { + return externalLocation(Output.of(externalLocation)); + } + + public Builder foreignConnection(@Nullable Output foreignConnection) { + $.foreignConnection = foreignConnection; + return this; + } + + public Builder foreignConnection(String foreignConnection) { + return foreignConnection(Output.of(foreignConnection)); + } + + public Builder function(@Nullable Output function) { + $.function = function; + return this; + } + + public Builder function(String function) { + return function(Output.of(function)); + } + + public Builder metastore(@Nullable Output metastore) { + $.metastore = metastore; + return this; + } + + public Builder metastore(String metastore) { + return metastore(Output.of(metastore)); + } + + public Builder model(@Nullable Output model) { + $.model = model; + return this; + } + + public Builder model(String model) { + return model(Output.of(model)); + } + + public Builder pipeline(@Nullable Output pipeline) { + $.pipeline = pipeline; + return this; + } + + public Builder pipeline(String pipeline) { + return pipeline(Output.of(pipeline)); + } + + public Builder principal(Output principal) { + $.principal = principal; + return this; + } + + public Builder principal(String principal) { + return principal(Output.of(principal)); + } + + public Builder privileges(Output> privileges) { + $.privileges = privileges; + return this; + } + + public Builder privileges(List privileges) { + return privileges(Output.of(privileges)); + } + + public Builder privileges(String... privileges) { + return privileges(List.of(privileges)); + } + + public Builder recipient(@Nullable Output recipient) { + $.recipient = recipient; + return this; + } + + public Builder recipient(String recipient) { + return recipient(Output.of(recipient)); + } + + public Builder schema(@Nullable Output schema) { + $.schema = schema; + return this; + } + + public Builder schema(String schema) { + return schema(Output.of(schema)); + } + + public Builder share(@Nullable Output share) { + $.share = share; + return this; + } + + public Builder share(String share) { + return share(Output.of(share)); + } + + public Builder storageCredential(@Nullable Output storageCredential) { + $.storageCredential = storageCredential; + return this; + } + + public Builder storageCredential(String storageCredential) { + return storageCredential(Output.of(storageCredential)); + } + + public Builder table(@Nullable Output table) { + $.table = table; + return this; + } + + public Builder table(String table) { + return table(Output.of(table)); + } + + public Builder volume(@Nullable Output volume) { + $.volume = volume; + return this; + } + + public Builder volume(String volume) { + return volume(Output.of(volume)); + } + + public GrantArgs build() { + if ($.principal == null) { + throw new MissingRequiredPropertyException("GrantArgs", "principal"); + } + if ($.privileges == null) { + throw new MissingRequiredPropertyException("GrantArgs", "privileges"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java b/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java index 549501c8..ecfeedf6 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java @@ -17,6 +17,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. * * Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java index 6255ca20..85467a8f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java @@ -16,6 +16,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with account-level provider! + * * A single databricks.Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. * * ## Example Usage diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccess.java b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccess.java index c07ad8b2..d1ab6ea4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccess.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccess.java @@ -21,6 +21,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * Optionally, each databricks.Metastore can have a default databricks.StorageCredential defined as `databricks.MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined. * * ## Import @@ -120,6 +122,12 @@ public Output owner() { public Output> readOnly() { return Codegen.optional(this.readOnly); } + @Export(name="skipValidation", refs={Boolean.class}, tree="[0]") + private Output skipValidation; + + public Output> skipValidation() { + return Codegen.optional(this.skipValidation); + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccessArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccessArgs.java index be3459bd..ed41d9b3 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccessArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreDataAccessArgs.java @@ -120,6 +120,13 @@ public Optional> readOnly() { return Optional.ofNullable(this.readOnly); } + @Import(name="skipValidation") + private @Nullable Output skipValidation; + + public Optional> skipValidation() { + return Optional.ofNullable(this.skipValidation); + } + private MetastoreDataAccessArgs() {} private MetastoreDataAccessArgs(MetastoreDataAccessArgs $) { @@ -136,6 +143,7 @@ private MetastoreDataAccessArgs(MetastoreDataAccessArgs $) { this.name = $.name; this.owner = $.owner; this.readOnly = $.readOnly; + this.skipValidation = $.skipValidation; } public static Builder builder() { @@ -285,6 +293,15 @@ public Builder readOnly(Boolean readOnly) { return readOnly(Output.of(readOnly)); } + public Builder skipValidation(@Nullable Output skipValidation) { + $.skipValidation = skipValidation; + return this; + } + + public Builder skipValidation(Boolean skipValidation) { + return skipValidation(Output.of(skipValidation)); + } + public MetastoreDataAccessArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreProvider.java b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreProvider.java index f06b92a1..c34d690c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreProvider.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreProvider.java @@ -16,6 +16,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. * * A `databricks.MetastoreProvider` is contained within databricks.Metastore and can contain a list of shares that have been shared with you. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Recipient.java b/sdk/java/src/main/java/com/pulumi/databricks/Recipient.java index cc503ec4..fce295d3 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Recipient.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Recipient.java @@ -18,6 +18,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Within a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to. * * A `databricks.Recipient` is contained within databricks.Metastore and can have permissions to `SELECT` from a list of shares. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/RegisteredModel.java b/sdk/java/src/main/java/com/pulumi/databricks/RegisteredModel.java index 1844a2a9..d070639f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/RegisteredModel.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/RegisteredModel.java @@ -15,6 +15,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with workspace-level provider! + * * This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. * * ## Example Usage diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Repo.java b/sdk/java/src/main/java/com/pulumi/databricks/Repo.java index 4cca3a47..27985208 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Repo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Repo.java @@ -117,6 +117,20 @@ public Output> tag() { public Output url() { return this.url; } + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + @Export(name="workspacePath", refs={String.class}, tree="[0]") + private Output workspacePath; + + /** + * @return path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + public Output workspacePath() { + return this.workspacePath; + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Schema.java b/sdk/java/src/main/java/com/pulumi/databricks/Schema.java index 2d340672..0a1a2701 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Schema.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Schema.java @@ -18,6 +18,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. * * A `databricks.Schema` is contained within databricks.Catalog and can contain tables & views. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpoint.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpoint.java index 0d6bfb6c..ff7038dd 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpoint.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpoint.java @@ -11,11 +11,13 @@ import com.pulumi.databricks.Utilities; import com.pulumi.databricks.inputs.SqlEndpointState; import com.pulumi.databricks.outputs.SqlEndpointChannel; +import com.pulumi.databricks.outputs.SqlEndpointHealth; import com.pulumi.databricks.outputs.SqlEndpointOdbcParams; import com.pulumi.databricks.outputs.SqlEndpointTags; import java.lang.Boolean; import java.lang.Integer; import java.lang.String; +import java.util.List; import java.util.Optional; import javax.annotation.Nullable; @@ -130,6 +132,20 @@ public Output> channel() { public Output clusterSize() { return this.clusterSize; } + /** + * The username of the user who created the endpoint. + * + */ + @Export(name="creatorName", refs={String.class}, tree="[0]") + private Output creatorName; + + /** + * @return The username of the user who created the endpoint. + * + */ + public Output creatorName() { + return this.creatorName; + } /** * ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. * @@ -180,6 +196,20 @@ public Output> enablePhoton() { public Output> enableServerlessCompute() { return Codegen.optional(this.enableServerlessCompute); } + /** + * Health status of the endpoint. + * + */ + @Export(name="healths", refs={List.class,SqlEndpointHealth.class}, tree="[0,1]") + private Output> healths; + + /** + * @return Health status of the endpoint. + * + */ + public Output> healths() { + return this.healths; + } @Export(name="instanceProfileArn", refs={String.class}, tree="[0]") private Output instanceProfileArn; @@ -242,11 +272,33 @@ public Output> minNumClusters() { public Output name() { return this.name; } + /** + * The current number of clusters used by the endpoint. + * + */ + @Export(name="numActiveSessions", refs={Integer.class}, tree="[0]") + private Output numActiveSessions; + + /** + * @return The current number of clusters used by the endpoint. + * + */ + public Output numActiveSessions() { + return this.numActiveSessions; + } + /** + * The current number of clusters used by the endpoint. + * + */ @Export(name="numClusters", refs={Integer.class}, tree="[0]") - private Output numClusters; + private Output numClusters; - public Output> numClusters() { - return Codegen.optional(this.numClusters); + /** + * @return The current number of clusters used by the endpoint. + * + */ + public Output numClusters() { + return this.numClusters; } /** * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -276,9 +328,17 @@ public Output odbcParams() { public Output> spotInstancePolicy() { return Codegen.optional(this.spotInstancePolicy); } + /** + * The current state of the endpoint. + * + */ @Export(name="state", refs={String.class}, tree="[0]") private Output state; + /** + * @return The current state of the endpoint. + * + */ public Output state() { return this.state; } @@ -297,14 +357,14 @@ public Output> tags() { return Codegen.optional(this.tags); } /** - * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * */ @Export(name="warehouseType", refs={String.class}, tree="[0]") private Output warehouseType; /** - * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * */ public Output> warehouseType() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpointArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpointArgs.java index 52cd97f9..4dd4560f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpointArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlEndpointArgs.java @@ -6,7 +6,6 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.SqlEndpointChannelArgs; -import com.pulumi.databricks.inputs.SqlEndpointOdbcParamsArgs; import com.pulumi.databricks.inputs.SqlEndpointTagsArgs; import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.Boolean; @@ -126,21 +125,6 @@ public Optional> instanceProfileArn() { return Optional.ofNullable(this.instanceProfileArn); } - /** - * JDBC connection string. - * - */ - @Import(name="jdbcUrl") - private @Nullable Output jdbcUrl; - - /** - * @return JDBC connection string. - * - */ - public Optional> jdbcUrl() { - return Optional.ofNullable(this.jdbcUrl); - } - /** * Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. * @@ -186,28 +170,6 @@ public Optional> name() { return Optional.ofNullable(this.name); } - @Import(name="numClusters") - private @Nullable Output numClusters; - - public Optional> numClusters() { - return Optional.ofNullable(this.numClusters); - } - - /** - * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - * - */ - @Import(name="odbcParams") - private @Nullable Output odbcParams; - - /** - * @return ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - * - */ - public Optional> odbcParams() { - return Optional.ofNullable(this.odbcParams); - } - /** * The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. * @@ -223,13 +185,6 @@ public Optional> spotInstancePolicy() { return Optional.ofNullable(this.spotInstancePolicy); } - @Import(name="state") - private @Nullable Output state; - - public Optional> state() { - return Optional.ofNullable(this.state); - } - /** * Databricks tags all endpoint resources with these tags. * @@ -246,14 +201,14 @@ public Optional> tags() { } /** - * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * */ @Import(name="warehouseType") private @Nullable Output warehouseType; /** - * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * */ public Optional> warehouseType() { @@ -270,14 +225,10 @@ private SqlEndpointArgs(SqlEndpointArgs $) { this.enablePhoton = $.enablePhoton; this.enableServerlessCompute = $.enableServerlessCompute; this.instanceProfileArn = $.instanceProfileArn; - this.jdbcUrl = $.jdbcUrl; this.maxNumClusters = $.maxNumClusters; this.minNumClusters = $.minNumClusters; this.name = $.name; - this.numClusters = $.numClusters; - this.odbcParams = $.odbcParams; this.spotInstancePolicy = $.spotInstancePolicy; - this.state = $.state; this.tags = $.tags; this.warehouseType = $.warehouseType; } @@ -443,27 +394,6 @@ public Builder instanceProfileArn(String instanceProfileArn) { return instanceProfileArn(Output.of(instanceProfileArn)); } - /** - * @param jdbcUrl JDBC connection string. - * - * @return builder - * - */ - public Builder jdbcUrl(@Nullable Output jdbcUrl) { - $.jdbcUrl = jdbcUrl; - return this; - } - - /** - * @param jdbcUrl JDBC connection string. - * - * @return builder - * - */ - public Builder jdbcUrl(String jdbcUrl) { - return jdbcUrl(Output.of(jdbcUrl)); - } - /** * @param maxNumClusters Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. * @@ -527,36 +457,6 @@ public Builder name(String name) { return name(Output.of(name)); } - public Builder numClusters(@Nullable Output numClusters) { - $.numClusters = numClusters; - return this; - } - - public Builder numClusters(Integer numClusters) { - return numClusters(Output.of(numClusters)); - } - - /** - * @param odbcParams ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - * - * @return builder - * - */ - public Builder odbcParams(@Nullable Output odbcParams) { - $.odbcParams = odbcParams; - return this; - } - - /** - * @param odbcParams ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - * - * @return builder - * - */ - public Builder odbcParams(SqlEndpointOdbcParamsArgs odbcParams) { - return odbcParams(Output.of(odbcParams)); - } - /** * @param spotInstancePolicy The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. * @@ -578,15 +478,6 @@ public Builder spotInstancePolicy(String spotInstancePolicy) { return spotInstancePolicy(Output.of(spotInstancePolicy)); } - public Builder state(@Nullable Output state) { - $.state = state; - return this; - } - - public Builder state(String state) { - return state(Output.of(state)); - } - /** * @param tags Databricks tags all endpoint resources with these tags. * @@ -609,7 +500,7 @@ public Builder tags(SqlEndpointTagsArgs tags) { } /** - * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * * @return builder * @@ -620,7 +511,7 @@ public Builder warehouseType(@Nullable Output warehouseType) { } /** - * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java index aa6c3b18..d17516b8 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java @@ -21,6 +21,8 @@ import javax.annotation.Nullable; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: * * - `databricks.StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. @@ -287,6 +289,20 @@ public Output owner() { public Output> readOnly() { return Codegen.optional(this.readOnly); } + /** + * Suppress validation errors if any & force save the storage credential. + * + */ + @Export(name="skipValidation", refs={Boolean.class}, tree="[0]") + private Output skipValidation; + + /** + * @return Suppress validation errors if any & force save the storage credential. + * + */ + public Output> skipValidation() { + return Codegen.optional(this.skipValidation); + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java index 26c812af..81cf21bb 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java @@ -149,6 +149,21 @@ public Optional> readOnly() { return Optional.ofNullable(this.readOnly); } + /** + * Suppress validation errors if any & force save the storage credential. + * + */ + @Import(name="skipValidation") + private @Nullable Output skipValidation; + + /** + * @return Suppress validation errors if any & force save the storage credential. + * + */ + public Optional> skipValidation() { + return Optional.ofNullable(this.skipValidation); + } + private StorageCredentialArgs() {} private StorageCredentialArgs(StorageCredentialArgs $) { @@ -164,6 +179,7 @@ private StorageCredentialArgs(StorageCredentialArgs $) { this.name = $.name; this.owner = $.owner; this.readOnly = $.readOnly; + this.skipValidation = $.skipValidation; } public static Builder builder() { @@ -356,6 +372,27 @@ public Builder readOnly(Boolean readOnly) { return readOnly(Output.of(readOnly)); } + /** + * @param skipValidation Suppress validation errors if any & force save the storage credential. + * + * @return builder + * + */ + public Builder skipValidation(@Nullable Output skipValidation) { + $.skipValidation = skipValidation; + return this; + } + + /** + * @param skipValidation Suppress validation errors if any & force save the storage credential. + * + * @return builder + * + */ + public Builder skipValidation(Boolean skipValidation) { + return skipValidation(Output.of(skipValidation)); + } + public StorageCredentialArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SystemSchema.java b/sdk/java/src/main/java/com/pulumi/databricks/SystemSchema.java index ee50e4d0..538bd6b4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SystemSchema.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SystemSchema.java @@ -17,8 +17,7 @@ /** * > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). * - * > **Notes** - * Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. + * > **Note** This resource could be only used with workspace-level provider! * * Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Volume.java b/sdk/java/src/main/java/com/pulumi/databricks/Volume.java index ffa67e89..b9932b30 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Volume.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Volume.java @@ -17,6 +17,8 @@ /** * > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). * + * > **Note** This resource could be only used with workspace-level provider! + * * Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. * * A volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog. diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/DirectoryState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/DirectoryState.java index 530c1412..a902dca4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/DirectoryState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/DirectoryState.java @@ -54,12 +54,28 @@ public Optional> path() { return Optional.ofNullable(this.path); } + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + @Import(name="workspacePath") + private @Nullable Output workspacePath; + + /** + * @return path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + public Optional> workspacePath() { + return Optional.ofNullable(this.workspacePath); + } + private DirectoryState() {} private DirectoryState(DirectoryState $) { this.deleteRecursive = $.deleteRecursive; this.objectId = $.objectId; this.path = $.path; + this.workspacePath = $.workspacePath; } public static Builder builder() { @@ -131,6 +147,27 @@ public Builder path(String path) { return path(Output.of(path)); } + /** + * @param workspacePath path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + * @return builder + * + */ + public Builder workspacePath(@Nullable Output workspacePath) { + $.workspacePath = workspacePath; + return this; + } + + /** + * @param workspacePath path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + * @return builder + * + */ + public Builder workspacePath(String workspacePath) { + return workspacePath(Output.of(workspacePath)); + } + public DirectoryState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreArgs.java new file mode 100644 index 00000000..53a37867 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreArgs.java @@ -0,0 +1,121 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.GetCurrentMetastoreMetastoreInfoArgs; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCurrentMetastoreArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCurrentMetastoreArgs Empty = new GetCurrentMetastoreArgs(); + + /** + * metastore ID. + * + */ + @Import(name="id") + private @Nullable Output id; + + /** + * @return metastore ID. + * + */ + public Optional> id() { + return Optional.ofNullable(this.id); + } + + /** + * summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + */ + @Import(name="metastoreInfo") + private @Nullable Output metastoreInfo; + + /** + * @return summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + */ + public Optional> metastoreInfo() { + return Optional.ofNullable(this.metastoreInfo); + } + + private GetCurrentMetastoreArgs() {} + + private GetCurrentMetastoreArgs(GetCurrentMetastoreArgs $) { + this.id = $.id; + this.metastoreInfo = $.metastoreInfo; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCurrentMetastoreArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCurrentMetastoreArgs $; + + public Builder() { + $ = new GetCurrentMetastoreArgs(); + } + + public Builder(GetCurrentMetastoreArgs defaults) { + $ = new GetCurrentMetastoreArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id metastore ID. + * + * @return builder + * + */ + public Builder id(@Nullable Output id) { + $.id = id; + return this; + } + + /** + * @param id metastore ID. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + /** + * @param metastoreInfo summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + * @return builder + * + */ + public Builder metastoreInfo(@Nullable Output metastoreInfo) { + $.metastoreInfo = metastoreInfo; + return this; + } + + /** + * @param metastoreInfo summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + * @return builder + * + */ + public Builder metastoreInfo(GetCurrentMetastoreMetastoreInfoArgs metastoreInfo) { + return metastoreInfo(Output.of(metastoreInfo)); + } + + public GetCurrentMetastoreArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfo.java new file mode 100644 index 00000000..50029ae3 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfo.java @@ -0,0 +1,518 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCurrentMetastoreMetastoreInfo extends com.pulumi.resources.InvokeArgs { + + public static final GetCurrentMetastoreMetastoreInfo Empty = new GetCurrentMetastoreMetastoreInfo(); + + @Import(name="cloud") + private @Nullable String cloud; + + public Optional cloud() { + return Optional.ofNullable(this.cloud); + } + + /** + * Timestamp (in milliseconds) when the current metastore was created. + * + */ + @Import(name="createdAt") + private @Nullable Integer createdAt; + + /** + * @return Timestamp (in milliseconds) when the current metastore was created. + * + */ + public Optional createdAt() { + return Optional.ofNullable(this.createdAt); + } + + /** + * the ID of the identity that created the current metastore. + * + */ + @Import(name="createdBy") + private @Nullable String createdBy; + + /** + * @return the ID of the identity that created the current metastore. + * + */ + public Optional createdBy() { + return Optional.ofNullable(this.createdBy); + } + + /** + * the ID of the default data access configuration. + * + */ + @Import(name="defaultDataAccessConfigId") + private @Nullable String defaultDataAccessConfigId; + + /** + * @return the ID of the default data access configuration. + * + */ + public Optional defaultDataAccessConfigId() { + return Optional.ofNullable(this.defaultDataAccessConfigId); + } + + /** + * The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + */ + @Import(name="deltaSharingOrganizationName") + private @Nullable String deltaSharingOrganizationName; + + /** + * @return The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + */ + public Optional deltaSharingOrganizationName() { + return Optional.ofNullable(this.deltaSharingOrganizationName); + } + + /** + * the expiration duration in seconds on recipient data access tokens. + * + */ + @Import(name="deltaSharingRecipientTokenLifetimeInSeconds") + private @Nullable Integer deltaSharingRecipientTokenLifetimeInSeconds; + + /** + * @return the expiration duration in seconds on recipient data access tokens. + * + */ + public Optional deltaSharingRecipientTokenLifetimeInSeconds() { + return Optional.ofNullable(this.deltaSharingRecipientTokenLifetimeInSeconds); + } + + /** + * Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + */ + @Import(name="deltaSharingScope") + private @Nullable String deltaSharingScope; + + /** + * @return Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + */ + public Optional deltaSharingScope() { + return Optional.ofNullable(this.deltaSharingScope); + } + + /** + * Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + */ + @Import(name="globalMetastoreId") + private @Nullable String globalMetastoreId; + + /** + * @return Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + */ + public Optional globalMetastoreId() { + return Optional.ofNullable(this.globalMetastoreId); + } + + /** + * Metastore ID. + * + */ + @Import(name="metastoreId") + private @Nullable String metastoreId; + + /** + * @return Metastore ID. + * + */ + public Optional metastoreId() { + return Optional.ofNullable(this.metastoreId); + } + + /** + * Name of metastore. + * + */ + @Import(name="name") + private @Nullable String name; + + /** + * @return Name of metastore. + * + */ + public Optional name() { + return Optional.ofNullable(this.name); + } + + /** + * Username/group name/sp application_id of the metastore owner. + * + */ + @Import(name="owner") + private @Nullable String owner; + + /** + * @return Username/group name/sp application_id of the metastore owner. + * + */ + public Optional owner() { + return Optional.ofNullable(this.owner); + } + + /** + * the version of the privilege model used by the metastore. + * + */ + @Import(name="privilegeModelVersion") + private @Nullable String privilegeModelVersion; + + /** + * @return the version of the privilege model used by the metastore. + * + */ + public Optional privilegeModelVersion() { + return Optional.ofNullable(this.privilegeModelVersion); + } + + /** + * (Mandatory for account-level) The region of the metastore. + * + */ + @Import(name="region") + private @Nullable String region; + + /** + * @return (Mandatory for account-level) The region of the metastore. + * + */ + public Optional region() { + return Optional.ofNullable(this.region); + } + + /** + * Path on cloud storage account, where managed `databricks.Table` are stored. + * + */ + @Import(name="storageRoot") + private @Nullable String storageRoot; + + /** + * @return Path on cloud storage account, where managed `databricks.Table` are stored. + * + */ + public Optional storageRoot() { + return Optional.ofNullable(this.storageRoot); + } + + /** + * ID of a storage credential used for the `storage_root`. + * + */ + @Import(name="storageRootCredentialId") + private @Nullable String storageRootCredentialId; + + /** + * @return ID of a storage credential used for the `storage_root`. + * + */ + public Optional storageRootCredentialId() { + return Optional.ofNullable(this.storageRootCredentialId); + } + + /** + * Name of a storage credential used for the `storage_root`. + * + */ + @Import(name="storageRootCredentialName") + private @Nullable String storageRootCredentialName; + + /** + * @return Name of a storage credential used for the `storage_root`. + * + */ + public Optional storageRootCredentialName() { + return Optional.ofNullable(this.storageRootCredentialName); + } + + /** + * Timestamp (in milliseconds) when the current metastore was updated. + * + */ + @Import(name="updatedAt") + private @Nullable Integer updatedAt; + + /** + * @return Timestamp (in milliseconds) when the current metastore was updated. + * + */ + public Optional updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + + /** + * the ID of the identity that updated the current metastore. + * + */ + @Import(name="updatedBy") + private @Nullable String updatedBy; + + /** + * @return the ID of the identity that updated the current metastore. + * + */ + public Optional updatedBy() { + return Optional.ofNullable(this.updatedBy); + } + + private GetCurrentMetastoreMetastoreInfo() {} + + private GetCurrentMetastoreMetastoreInfo(GetCurrentMetastoreMetastoreInfo $) { + this.cloud = $.cloud; + this.createdAt = $.createdAt; + this.createdBy = $.createdBy; + this.defaultDataAccessConfigId = $.defaultDataAccessConfigId; + this.deltaSharingOrganizationName = $.deltaSharingOrganizationName; + this.deltaSharingRecipientTokenLifetimeInSeconds = $.deltaSharingRecipientTokenLifetimeInSeconds; + this.deltaSharingScope = $.deltaSharingScope; + this.globalMetastoreId = $.globalMetastoreId; + this.metastoreId = $.metastoreId; + this.name = $.name; + this.owner = $.owner; + this.privilegeModelVersion = $.privilegeModelVersion; + this.region = $.region; + this.storageRoot = $.storageRoot; + this.storageRootCredentialId = $.storageRootCredentialId; + this.storageRootCredentialName = $.storageRootCredentialName; + this.updatedAt = $.updatedAt; + this.updatedBy = $.updatedBy; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCurrentMetastoreMetastoreInfo defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCurrentMetastoreMetastoreInfo $; + + public Builder() { + $ = new GetCurrentMetastoreMetastoreInfo(); + } + + public Builder(GetCurrentMetastoreMetastoreInfo defaults) { + $ = new GetCurrentMetastoreMetastoreInfo(Objects.requireNonNull(defaults)); + } + + public Builder cloud(@Nullable String cloud) { + $.cloud = cloud; + return this; + } + + /** + * @param createdAt Timestamp (in milliseconds) when the current metastore was created. + * + * @return builder + * + */ + public Builder createdAt(@Nullable Integer createdAt) { + $.createdAt = createdAt; + return this; + } + + /** + * @param createdBy the ID of the identity that created the current metastore. + * + * @return builder + * + */ + public Builder createdBy(@Nullable String createdBy) { + $.createdBy = createdBy; + return this; + } + + /** + * @param defaultDataAccessConfigId the ID of the default data access configuration. + * + * @return builder + * + */ + public Builder defaultDataAccessConfigId(@Nullable String defaultDataAccessConfigId) { + $.defaultDataAccessConfigId = defaultDataAccessConfigId; + return this; + } + + /** + * @param deltaSharingOrganizationName The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + * @return builder + * + */ + public Builder deltaSharingOrganizationName(@Nullable String deltaSharingOrganizationName) { + $.deltaSharingOrganizationName = deltaSharingOrganizationName; + return this; + } + + /** + * @param deltaSharingRecipientTokenLifetimeInSeconds the expiration duration in seconds on recipient data access tokens. + * + * @return builder + * + */ + public Builder deltaSharingRecipientTokenLifetimeInSeconds(@Nullable Integer deltaSharingRecipientTokenLifetimeInSeconds) { + $.deltaSharingRecipientTokenLifetimeInSeconds = deltaSharingRecipientTokenLifetimeInSeconds; + return this; + } + + /** + * @param deltaSharingScope Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + * @return builder + * + */ + public Builder deltaSharingScope(@Nullable String deltaSharingScope) { + $.deltaSharingScope = deltaSharingScope; + return this; + } + + /** + * @param globalMetastoreId Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + * @return builder + * + */ + public Builder globalMetastoreId(@Nullable String globalMetastoreId) { + $.globalMetastoreId = globalMetastoreId; + return this; + } + + /** + * @param metastoreId Metastore ID. + * + * @return builder + * + */ + public Builder metastoreId(@Nullable String metastoreId) { + $.metastoreId = metastoreId; + return this; + } + + /** + * @param name Name of metastore. + * + * @return builder + * + */ + public Builder name(@Nullable String name) { + $.name = name; + return this; + } + + /** + * @param owner Username/group name/sp application_id of the metastore owner. + * + * @return builder + * + */ + public Builder owner(@Nullable String owner) { + $.owner = owner; + return this; + } + + /** + * @param privilegeModelVersion the version of the privilege model used by the metastore. + * + * @return builder + * + */ + public Builder privilegeModelVersion(@Nullable String privilegeModelVersion) { + $.privilegeModelVersion = privilegeModelVersion; + return this; + } + + /** + * @param region (Mandatory for account-level) The region of the metastore. + * + * @return builder + * + */ + public Builder region(@Nullable String region) { + $.region = region; + return this; + } + + /** + * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. + * + * @return builder + * + */ + public Builder storageRoot(@Nullable String storageRoot) { + $.storageRoot = storageRoot; + return this; + } + + /** + * @param storageRootCredentialId ID of a storage credential used for the `storage_root`. + * + * @return builder + * + */ + public Builder storageRootCredentialId(@Nullable String storageRootCredentialId) { + $.storageRootCredentialId = storageRootCredentialId; + return this; + } + + /** + * @param storageRootCredentialName Name of a storage credential used for the `storage_root`. + * + * @return builder + * + */ + public Builder storageRootCredentialName(@Nullable String storageRootCredentialName) { + $.storageRootCredentialName = storageRootCredentialName; + return this; + } + + /** + * @param updatedAt Timestamp (in milliseconds) when the current metastore was updated. + * + * @return builder + * + */ + public Builder updatedAt(@Nullable Integer updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + /** + * @param updatedBy the ID of the identity that updated the current metastore. + * + * @return builder + * + */ + public Builder updatedBy(@Nullable String updatedBy) { + $.updatedBy = updatedBy; + return this; + } + + public GetCurrentMetastoreMetastoreInfo build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfoArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfoArgs.java new file mode 100644 index 00000000..1094dfd5 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastoreMetastoreInfoArgs.java @@ -0,0 +1,693 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCurrentMetastoreMetastoreInfoArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetCurrentMetastoreMetastoreInfoArgs Empty = new GetCurrentMetastoreMetastoreInfoArgs(); + + @Import(name="cloud") + private @Nullable Output cloud; + + public Optional> cloud() { + return Optional.ofNullable(this.cloud); + } + + /** + * Timestamp (in milliseconds) when the current metastore was created. + * + */ + @Import(name="createdAt") + private @Nullable Output createdAt; + + /** + * @return Timestamp (in milliseconds) when the current metastore was created. + * + */ + public Optional> createdAt() { + return Optional.ofNullable(this.createdAt); + } + + /** + * the ID of the identity that created the current metastore. + * + */ + @Import(name="createdBy") + private @Nullable Output createdBy; + + /** + * @return the ID of the identity that created the current metastore. + * + */ + public Optional> createdBy() { + return Optional.ofNullable(this.createdBy); + } + + /** + * the ID of the default data access configuration. + * + */ + @Import(name="defaultDataAccessConfigId") + private @Nullable Output defaultDataAccessConfigId; + + /** + * @return the ID of the default data access configuration. + * + */ + public Optional> defaultDataAccessConfigId() { + return Optional.ofNullable(this.defaultDataAccessConfigId); + } + + /** + * The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + */ + @Import(name="deltaSharingOrganizationName") + private @Nullable Output deltaSharingOrganizationName; + + /** + * @return The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + */ + public Optional> deltaSharingOrganizationName() { + return Optional.ofNullable(this.deltaSharingOrganizationName); + } + + /** + * the expiration duration in seconds on recipient data access tokens. + * + */ + @Import(name="deltaSharingRecipientTokenLifetimeInSeconds") + private @Nullable Output deltaSharingRecipientTokenLifetimeInSeconds; + + /** + * @return the expiration duration in seconds on recipient data access tokens. + * + */ + public Optional> deltaSharingRecipientTokenLifetimeInSeconds() { + return Optional.ofNullable(this.deltaSharingRecipientTokenLifetimeInSeconds); + } + + /** + * Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + */ + @Import(name="deltaSharingScope") + private @Nullable Output deltaSharingScope; + + /** + * @return Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + */ + public Optional> deltaSharingScope() { + return Optional.ofNullable(this.deltaSharingScope); + } + + /** + * Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + */ + @Import(name="globalMetastoreId") + private @Nullable Output globalMetastoreId; + + /** + * @return Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + */ + public Optional> globalMetastoreId() { + return Optional.ofNullable(this.globalMetastoreId); + } + + /** + * Metastore ID. + * + */ + @Import(name="metastoreId") + private @Nullable Output metastoreId; + + /** + * @return Metastore ID. + * + */ + public Optional> metastoreId() { + return Optional.ofNullable(this.metastoreId); + } + + /** + * Name of metastore. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return Name of metastore. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * Username/group name/sp application_id of the metastore owner. + * + */ + @Import(name="owner") + private @Nullable Output owner; + + /** + * @return Username/group name/sp application_id of the metastore owner. + * + */ + public Optional> owner() { + return Optional.ofNullable(this.owner); + } + + /** + * the version of the privilege model used by the metastore. + * + */ + @Import(name="privilegeModelVersion") + private @Nullable Output privilegeModelVersion; + + /** + * @return the version of the privilege model used by the metastore. + * + */ + public Optional> privilegeModelVersion() { + return Optional.ofNullable(this.privilegeModelVersion); + } + + /** + * (Mandatory for account-level) The region of the metastore. + * + */ + @Import(name="region") + private @Nullable Output region; + + /** + * @return (Mandatory for account-level) The region of the metastore. + * + */ + public Optional> region() { + return Optional.ofNullable(this.region); + } + + /** + * Path on cloud storage account, where managed `databricks.Table` are stored. + * + */ + @Import(name="storageRoot") + private @Nullable Output storageRoot; + + /** + * @return Path on cloud storage account, where managed `databricks.Table` are stored. + * + */ + public Optional> storageRoot() { + return Optional.ofNullable(this.storageRoot); + } + + /** + * ID of a storage credential used for the `storage_root`. + * + */ + @Import(name="storageRootCredentialId") + private @Nullable Output storageRootCredentialId; + + /** + * @return ID of a storage credential used for the `storage_root`. + * + */ + public Optional> storageRootCredentialId() { + return Optional.ofNullable(this.storageRootCredentialId); + } + + /** + * Name of a storage credential used for the `storage_root`. + * + */ + @Import(name="storageRootCredentialName") + private @Nullable Output storageRootCredentialName; + + /** + * @return Name of a storage credential used for the `storage_root`. + * + */ + public Optional> storageRootCredentialName() { + return Optional.ofNullable(this.storageRootCredentialName); + } + + /** + * Timestamp (in milliseconds) when the current metastore was updated. + * + */ + @Import(name="updatedAt") + private @Nullable Output updatedAt; + + /** + * @return Timestamp (in milliseconds) when the current metastore was updated. + * + */ + public Optional> updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + + /** + * the ID of the identity that updated the current metastore. + * + */ + @Import(name="updatedBy") + private @Nullable Output updatedBy; + + /** + * @return the ID of the identity that updated the current metastore. + * + */ + public Optional> updatedBy() { + return Optional.ofNullable(this.updatedBy); + } + + private GetCurrentMetastoreMetastoreInfoArgs() {} + + private GetCurrentMetastoreMetastoreInfoArgs(GetCurrentMetastoreMetastoreInfoArgs $) { + this.cloud = $.cloud; + this.createdAt = $.createdAt; + this.createdBy = $.createdBy; + this.defaultDataAccessConfigId = $.defaultDataAccessConfigId; + this.deltaSharingOrganizationName = $.deltaSharingOrganizationName; + this.deltaSharingRecipientTokenLifetimeInSeconds = $.deltaSharingRecipientTokenLifetimeInSeconds; + this.deltaSharingScope = $.deltaSharingScope; + this.globalMetastoreId = $.globalMetastoreId; + this.metastoreId = $.metastoreId; + this.name = $.name; + this.owner = $.owner; + this.privilegeModelVersion = $.privilegeModelVersion; + this.region = $.region; + this.storageRoot = $.storageRoot; + this.storageRootCredentialId = $.storageRootCredentialId; + this.storageRootCredentialName = $.storageRootCredentialName; + this.updatedAt = $.updatedAt; + this.updatedBy = $.updatedBy; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCurrentMetastoreMetastoreInfoArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCurrentMetastoreMetastoreInfoArgs $; + + public Builder() { + $ = new GetCurrentMetastoreMetastoreInfoArgs(); + } + + public Builder(GetCurrentMetastoreMetastoreInfoArgs defaults) { + $ = new GetCurrentMetastoreMetastoreInfoArgs(Objects.requireNonNull(defaults)); + } + + public Builder cloud(@Nullable Output cloud) { + $.cloud = cloud; + return this; + } + + public Builder cloud(String cloud) { + return cloud(Output.of(cloud)); + } + + /** + * @param createdAt Timestamp (in milliseconds) when the current metastore was created. + * + * @return builder + * + */ + public Builder createdAt(@Nullable Output createdAt) { + $.createdAt = createdAt; + return this; + } + + /** + * @param createdAt Timestamp (in milliseconds) when the current metastore was created. + * + * @return builder + * + */ + public Builder createdAt(Integer createdAt) { + return createdAt(Output.of(createdAt)); + } + + /** + * @param createdBy the ID of the identity that created the current metastore. + * + * @return builder + * + */ + public Builder createdBy(@Nullable Output createdBy) { + $.createdBy = createdBy; + return this; + } + + /** + * @param createdBy the ID of the identity that created the current metastore. + * + * @return builder + * + */ + public Builder createdBy(String createdBy) { + return createdBy(Output.of(createdBy)); + } + + /** + * @param defaultDataAccessConfigId the ID of the default data access configuration. + * + * @return builder + * + */ + public Builder defaultDataAccessConfigId(@Nullable Output defaultDataAccessConfigId) { + $.defaultDataAccessConfigId = defaultDataAccessConfigId; + return this; + } + + /** + * @param defaultDataAccessConfigId the ID of the default data access configuration. + * + * @return builder + * + */ + public Builder defaultDataAccessConfigId(String defaultDataAccessConfigId) { + return defaultDataAccessConfigId(Output.of(defaultDataAccessConfigId)); + } + + /** + * @param deltaSharingOrganizationName The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + * @return builder + * + */ + public Builder deltaSharingOrganizationName(@Nullable Output deltaSharingOrganizationName) { + $.deltaSharingOrganizationName = deltaSharingOrganizationName; + return this; + } + + /** + * @param deltaSharingOrganizationName The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + * @return builder + * + */ + public Builder deltaSharingOrganizationName(String deltaSharingOrganizationName) { + return deltaSharingOrganizationName(Output.of(deltaSharingOrganizationName)); + } + + /** + * @param deltaSharingRecipientTokenLifetimeInSeconds the expiration duration in seconds on recipient data access tokens. + * + * @return builder + * + */ + public Builder deltaSharingRecipientTokenLifetimeInSeconds(@Nullable Output deltaSharingRecipientTokenLifetimeInSeconds) { + $.deltaSharingRecipientTokenLifetimeInSeconds = deltaSharingRecipientTokenLifetimeInSeconds; + return this; + } + + /** + * @param deltaSharingRecipientTokenLifetimeInSeconds the expiration duration in seconds on recipient data access tokens. + * + * @return builder + * + */ + public Builder deltaSharingRecipientTokenLifetimeInSeconds(Integer deltaSharingRecipientTokenLifetimeInSeconds) { + return deltaSharingRecipientTokenLifetimeInSeconds(Output.of(deltaSharingRecipientTokenLifetimeInSeconds)); + } + + /** + * @param deltaSharingScope Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + * @return builder + * + */ + public Builder deltaSharingScope(@Nullable Output deltaSharingScope) { + $.deltaSharingScope = deltaSharingScope; + return this; + } + + /** + * @param deltaSharingScope Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + * @return builder + * + */ + public Builder deltaSharingScope(String deltaSharingScope) { + return deltaSharingScope(Output.of(deltaSharingScope)); + } + + /** + * @param globalMetastoreId Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + * @return builder + * + */ + public Builder globalMetastoreId(@Nullable Output globalMetastoreId) { + $.globalMetastoreId = globalMetastoreId; + return this; + } + + /** + * @param globalMetastoreId Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + * @return builder + * + */ + public Builder globalMetastoreId(String globalMetastoreId) { + return globalMetastoreId(Output.of(globalMetastoreId)); + } + + /** + * @param metastoreId Metastore ID. + * + * @return builder + * + */ + public Builder metastoreId(@Nullable Output metastoreId) { + $.metastoreId = metastoreId; + return this; + } + + /** + * @param metastoreId Metastore ID. + * + * @return builder + * + */ + public Builder metastoreId(String metastoreId) { + return metastoreId(Output.of(metastoreId)); + } + + /** + * @param name Name of metastore. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name Name of metastore. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param owner Username/group name/sp application_id of the metastore owner. + * + * @return builder + * + */ + public Builder owner(@Nullable Output owner) { + $.owner = owner; + return this; + } + + /** + * @param owner Username/group name/sp application_id of the metastore owner. + * + * @return builder + * + */ + public Builder owner(String owner) { + return owner(Output.of(owner)); + } + + /** + * @param privilegeModelVersion the version of the privilege model used by the metastore. + * + * @return builder + * + */ + public Builder privilegeModelVersion(@Nullable Output privilegeModelVersion) { + $.privilegeModelVersion = privilegeModelVersion; + return this; + } + + /** + * @param privilegeModelVersion the version of the privilege model used by the metastore. + * + * @return builder + * + */ + public Builder privilegeModelVersion(String privilegeModelVersion) { + return privilegeModelVersion(Output.of(privilegeModelVersion)); + } + + /** + * @param region (Mandatory for account-level) The region of the metastore. + * + * @return builder + * + */ + public Builder region(@Nullable Output region) { + $.region = region; + return this; + } + + /** + * @param region (Mandatory for account-level) The region of the metastore. + * + * @return builder + * + */ + public Builder region(String region) { + return region(Output.of(region)); + } + + /** + * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. + * + * @return builder + * + */ + public Builder storageRoot(@Nullable Output storageRoot) { + $.storageRoot = storageRoot; + return this; + } + + /** + * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. + * + * @return builder + * + */ + public Builder storageRoot(String storageRoot) { + return storageRoot(Output.of(storageRoot)); + } + + /** + * @param storageRootCredentialId ID of a storage credential used for the `storage_root`. + * + * @return builder + * + */ + public Builder storageRootCredentialId(@Nullable Output storageRootCredentialId) { + $.storageRootCredentialId = storageRootCredentialId; + return this; + } + + /** + * @param storageRootCredentialId ID of a storage credential used for the `storage_root`. + * + * @return builder + * + */ + public Builder storageRootCredentialId(String storageRootCredentialId) { + return storageRootCredentialId(Output.of(storageRootCredentialId)); + } + + /** + * @param storageRootCredentialName Name of a storage credential used for the `storage_root`. + * + * @return builder + * + */ + public Builder storageRootCredentialName(@Nullable Output storageRootCredentialName) { + $.storageRootCredentialName = storageRootCredentialName; + return this; + } + + /** + * @param storageRootCredentialName Name of a storage credential used for the `storage_root`. + * + * @return builder + * + */ + public Builder storageRootCredentialName(String storageRootCredentialName) { + return storageRootCredentialName(Output.of(storageRootCredentialName)); + } + + /** + * @param updatedAt Timestamp (in milliseconds) when the current metastore was updated. + * + * @return builder + * + */ + public Builder updatedAt(@Nullable Output updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + /** + * @param updatedAt Timestamp (in milliseconds) when the current metastore was updated. + * + * @return builder + * + */ + public Builder updatedAt(Integer updatedAt) { + return updatedAt(Output.of(updatedAt)); + } + + /** + * @param updatedBy the ID of the identity that updated the current metastore. + * + * @return builder + * + */ + public Builder updatedBy(@Nullable Output updatedBy) { + $.updatedBy = updatedBy; + return this; + } + + /** + * @param updatedBy the ID of the identity that updated the current metastore. + * + * @return builder + * + */ + public Builder updatedBy(String updatedBy) { + return updatedBy(Output.of(updatedBy)); + } + + public GetCurrentMetastoreMetastoreInfoArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastorePlainArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastorePlainArgs.java new file mode 100644 index 00000000..0a9a45e0 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetCurrentMetastorePlainArgs.java @@ -0,0 +1,100 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.GetCurrentMetastoreMetastoreInfo; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCurrentMetastorePlainArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCurrentMetastorePlainArgs Empty = new GetCurrentMetastorePlainArgs(); + + /** + * metastore ID. + * + */ + @Import(name="id") + private @Nullable String id; + + /** + * @return metastore ID. + * + */ + public Optional id() { + return Optional.ofNullable(this.id); + } + + /** + * summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + */ + @Import(name="metastoreInfo") + private @Nullable GetCurrentMetastoreMetastoreInfo metastoreInfo; + + /** + * @return summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + */ + public Optional metastoreInfo() { + return Optional.ofNullable(this.metastoreInfo); + } + + private GetCurrentMetastorePlainArgs() {} + + private GetCurrentMetastorePlainArgs(GetCurrentMetastorePlainArgs $) { + this.id = $.id; + this.metastoreInfo = $.metastoreInfo; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCurrentMetastorePlainArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCurrentMetastorePlainArgs $; + + public Builder() { + $ = new GetCurrentMetastorePlainArgs(); + } + + public Builder(GetCurrentMetastorePlainArgs defaults) { + $ = new GetCurrentMetastorePlainArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id metastore ID. + * + * @return builder + * + */ + public Builder id(@Nullable String id) { + $.id = id; + return this; + } + + /** + * @param metastoreInfo summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + * @return builder + * + */ + public Builder metastoreInfo(@Nullable GetCurrentMetastoreMetastoreInfo metastoreInfo) { + $.metastoreInfo = metastoreInfo; + return this; + } + + public GetCurrentMetastorePlainArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfo.java index 03af9548..4be16cd5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfo.java @@ -155,14 +155,14 @@ public Optional region() { } /** - * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * Path on cloud storage account, where managed `databricks.Table` are stored. * */ @Import(name="storageRoot") private @Nullable String storageRoot; /** - * @return Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * @return Path on cloud storage account, where managed `databricks.Table` are stored. * */ public Optional storageRoot() { @@ -340,7 +340,7 @@ public Builder region(@Nullable String region) { } /** - * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfoArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfoArgs.java index 1761eeab..5adce1c1 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfoArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetMetastoreMetastoreInfoArgs.java @@ -156,14 +156,14 @@ public Optional> region() { } /** - * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * Path on cloud storage account, where managed `databricks.Table` are stored. * */ @Import(name="storageRoot") private @Nullable Output storageRoot; /** - * @return Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * @return Path on cloud storage account, where managed `databricks.Table` are stored. * */ public Optional> storageRoot() { @@ -429,7 +429,7 @@ public Builder region(String region) { } /** - * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. * * @return builder * @@ -440,7 +440,7 @@ public Builder storageRoot(@Nullable Output storageRoot) { } /** - * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * @param storageRoot Path on cloud storage account, where managed `databricks.Table` are stored. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java index c4e6acac..08b838b2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java @@ -62,14 +62,14 @@ public Optional> applicationId() { } /** - * Display name of the service principal, e.g. `Foo SPN`. + * Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. * */ @Import(name="displayName") private @Nullable Output displayName; /** - * @return Display name of the service principal, e.g. `Foo SPN`. + * @return Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. * */ public Optional> displayName() { @@ -239,7 +239,7 @@ public Builder applicationId(String applicationId) { } /** - * @param displayName Display name of the service principal, e.g. `Foo SPN`. + * @param displayName Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. * * @return builder * @@ -250,7 +250,7 @@ public Builder displayName(@Nullable Output displayName) { } /** - * @param displayName Display name of the service principal, e.g. `Foo SPN`. + * @param displayName Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java index 3e1010e3..b3076ae1 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java @@ -61,14 +61,14 @@ public Optional applicationId() { } /** - * Display name of the service principal, e.g. `Foo SPN`. + * Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. * */ @Import(name="displayName") private @Nullable String displayName; /** - * @return Display name of the service principal, e.g. `Foo SPN`. + * @return Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. * */ public Optional displayName() { @@ -208,7 +208,7 @@ public Builder applicationId(@Nullable String applicationId) { } /** - * @param displayName Display name of the service principal, e.g. `Foo SPN`. + * @param displayName Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseArgs.java index 26ea061e..f1c69433 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseArgs.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.GetSqlWarehouseChannelArgs; +import com.pulumi.databricks.inputs.GetSqlWarehouseHealthArgs; import com.pulumi.databricks.inputs.GetSqlWarehouseOdbcParamsArgs; import com.pulumi.databricks.inputs.GetSqlWarehouseTagsArgs; import java.lang.Boolean; @@ -65,6 +66,21 @@ public Optional> clusterSize() { return Optional.ofNullable(this.clusterSize); } + /** + * The username of the user who created the endpoint. + * + */ + @Import(name="creatorName") + private @Nullable Output creatorName; + + /** + * @return The username of the user who created the endpoint. + * + */ + public Optional> creatorName() { + return Optional.ofNullable(this.creatorName); + } + /** * ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. * @@ -110,6 +126,21 @@ public Optional> enableServerlessCompute() { return Optional.ofNullable(this.enableServerlessCompute); } + /** + * Health status of the endpoint. + * + */ + @Import(name="health") + private @Nullable Output health; + + /** + * @return Health status of the endpoint. + * + */ + public Optional> health() { + return Optional.ofNullable(this.health); + } + /** * The ID of the SQL warehouse. * @@ -192,9 +223,32 @@ public Optional> name() { return Optional.ofNullable(this.name); } + /** + * The current number of clusters used by the endpoint. + * + */ + @Import(name="numActiveSessions") + private @Nullable Output numActiveSessions; + + /** + * @return The current number of clusters used by the endpoint. + * + */ + public Optional> numActiveSessions() { + return Optional.ofNullable(this.numActiveSessions); + } + + /** + * The current number of clusters used by the endpoint. + * + */ @Import(name="numClusters") private @Nullable Output numClusters; + /** + * @return The current number of clusters used by the endpoint. + * + */ public Optional> numClusters() { return Optional.ofNullable(this.numClusters); } @@ -229,9 +283,17 @@ public Optional> spotInstancePolicy() { return Optional.ofNullable(this.spotInstancePolicy); } + /** + * The current state of the endpoint. + * + */ @Import(name="state") private @Nullable Output state; + /** + * @return The current state of the endpoint. + * + */ public Optional> state() { return Optional.ofNullable(this.state); } @@ -251,26 +313,45 @@ public Optional> tags() { return Optional.ofNullable(this.tags); } + /** + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + */ + @Import(name="warehouseType") + private @Nullable Output warehouseType; + + /** + * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + */ + public Optional> warehouseType() { + return Optional.ofNullable(this.warehouseType); + } + private GetSqlWarehouseArgs() {} private GetSqlWarehouseArgs(GetSqlWarehouseArgs $) { this.autoStopMins = $.autoStopMins; this.channel = $.channel; this.clusterSize = $.clusterSize; + this.creatorName = $.creatorName; this.dataSourceId = $.dataSourceId; this.enablePhoton = $.enablePhoton; this.enableServerlessCompute = $.enableServerlessCompute; + this.health = $.health; this.id = $.id; this.instanceProfileArn = $.instanceProfileArn; this.jdbcUrl = $.jdbcUrl; this.maxNumClusters = $.maxNumClusters; this.minNumClusters = $.minNumClusters; this.name = $.name; + this.numActiveSessions = $.numActiveSessions; this.numClusters = $.numClusters; this.odbcParams = $.odbcParams; this.spotInstancePolicy = $.spotInstancePolicy; this.state = $.state; this.tags = $.tags; + this.warehouseType = $.warehouseType; } public static Builder builder() { @@ -354,6 +435,27 @@ public Builder clusterSize(String clusterSize) { return clusterSize(Output.of(clusterSize)); } + /** + * @param creatorName The username of the user who created the endpoint. + * + * @return builder + * + */ + public Builder creatorName(@Nullable Output creatorName) { + $.creatorName = creatorName; + return this; + } + + /** + * @param creatorName The username of the user who created the endpoint. + * + * @return builder + * + */ + public Builder creatorName(String creatorName) { + return creatorName(Output.of(creatorName)); + } + /** * @param dataSourceId ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. * @@ -417,6 +519,27 @@ public Builder enableServerlessCompute(Boolean enableServerlessCompute) { return enableServerlessCompute(Output.of(enableServerlessCompute)); } + /** + * @param health Health status of the endpoint. + * + * @return builder + * + */ + public Builder health(@Nullable Output health) { + $.health = health; + return this; + } + + /** + * @param health Health status of the endpoint. + * + * @return builder + * + */ + public Builder health(GetSqlWarehouseHealthArgs health) { + return health(Output.of(health)); + } + /** * @param id The ID of the SQL warehouse. * @@ -531,11 +654,44 @@ public Builder name(String name) { return name(Output.of(name)); } + /** + * @param numActiveSessions The current number of clusters used by the endpoint. + * + * @return builder + * + */ + public Builder numActiveSessions(@Nullable Output numActiveSessions) { + $.numActiveSessions = numActiveSessions; + return this; + } + + /** + * @param numActiveSessions The current number of clusters used by the endpoint. + * + * @return builder + * + */ + public Builder numActiveSessions(Integer numActiveSessions) { + return numActiveSessions(Output.of(numActiveSessions)); + } + + /** + * @param numClusters The current number of clusters used by the endpoint. + * + * @return builder + * + */ public Builder numClusters(@Nullable Output numClusters) { $.numClusters = numClusters; return this; } + /** + * @param numClusters The current number of clusters used by the endpoint. + * + * @return builder + * + */ public Builder numClusters(Integer numClusters) { return numClusters(Output.of(numClusters)); } @@ -582,11 +738,23 @@ public Builder spotInstancePolicy(String spotInstancePolicy) { return spotInstancePolicy(Output.of(spotInstancePolicy)); } + /** + * @param state The current state of the endpoint. + * + * @return builder + * + */ public Builder state(@Nullable Output state) { $.state = state; return this; } + /** + * @param state The current state of the endpoint. + * + * @return builder + * + */ public Builder state(String state) { return state(Output.of(state)); } @@ -612,6 +780,27 @@ public Builder tags(GetSqlWarehouseTagsArgs tags) { return tags(Output.of(tags)); } + /** + * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + * @return builder + * + */ + public Builder warehouseType(@Nullable Output warehouseType) { + $.warehouseType = warehouseType; + return this; + } + + /** + * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + * @return builder + * + */ + public Builder warehouseType(String warehouseType) { + return warehouseType(Output.of(warehouseType)); + } + public GetSqlWarehouseArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannel.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannel.java index 34a1a46f..c76566a9 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannel.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannel.java @@ -14,6 +14,13 @@ public final class GetSqlWarehouseChannel extends com.pulumi.resources.InvokeArg public static final GetSqlWarehouseChannel Empty = new GetSqlWarehouseChannel(); + @Import(name="dbsqlVersion") + private @Nullable String dbsqlVersion; + + public Optional dbsqlVersion() { + return Optional.ofNullable(this.dbsqlVersion); + } + /** * Name of the SQL warehouse to search (case-sensitive). * @@ -32,6 +39,7 @@ public Optional name() { private GetSqlWarehouseChannel() {} private GetSqlWarehouseChannel(GetSqlWarehouseChannel $) { + this.dbsqlVersion = $.dbsqlVersion; this.name = $.name; } @@ -53,6 +61,11 @@ public Builder(GetSqlWarehouseChannel defaults) { $ = new GetSqlWarehouseChannel(Objects.requireNonNull(defaults)); } + public Builder dbsqlVersion(@Nullable String dbsqlVersion) { + $.dbsqlVersion = dbsqlVersion; + return this; + } + /** * @param name Name of the SQL warehouse to search (case-sensitive). * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannelArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannelArgs.java index 4066766d..fd67f562 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannelArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseChannelArgs.java @@ -15,6 +15,13 @@ public final class GetSqlWarehouseChannelArgs extends com.pulumi.resources.Resou public static final GetSqlWarehouseChannelArgs Empty = new GetSqlWarehouseChannelArgs(); + @Import(name="dbsqlVersion") + private @Nullable Output dbsqlVersion; + + public Optional> dbsqlVersion() { + return Optional.ofNullable(this.dbsqlVersion); + } + /** * Name of the SQL warehouse to search (case-sensitive). * @@ -33,6 +40,7 @@ public Optional> name() { private GetSqlWarehouseChannelArgs() {} private GetSqlWarehouseChannelArgs(GetSqlWarehouseChannelArgs $) { + this.dbsqlVersion = $.dbsqlVersion; this.name = $.name; } @@ -54,6 +62,15 @@ public Builder(GetSqlWarehouseChannelArgs defaults) { $ = new GetSqlWarehouseChannelArgs(Objects.requireNonNull(defaults)); } + public Builder dbsqlVersion(@Nullable Output dbsqlVersion) { + $.dbsqlVersion = dbsqlVersion; + return this; + } + + public Builder dbsqlVersion(String dbsqlVersion) { + return dbsqlVersion(Output.of(dbsqlVersion)); + } + /** * @param name Name of the SQL warehouse to search (case-sensitive). * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealth.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealth.java new file mode 100644 index 00000000..4d446fbe --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealth.java @@ -0,0 +1,111 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.GetSqlWarehouseHealthFailureReason; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetSqlWarehouseHealth extends com.pulumi.resources.InvokeArgs { + + public static final GetSqlWarehouseHealth Empty = new GetSqlWarehouseHealth(); + + @Import(name="details") + private @Nullable String details; + + public Optional details() { + return Optional.ofNullable(this.details); + } + + @Import(name="failureReason") + private @Nullable GetSqlWarehouseHealthFailureReason failureReason; + + public Optional failureReason() { + return Optional.ofNullable(this.failureReason); + } + + @Import(name="message") + private @Nullable String message; + + public Optional message() { + return Optional.ofNullable(this.message); + } + + @Import(name="status") + private @Nullable String status; + + public Optional status() { + return Optional.ofNullable(this.status); + } + + @Import(name="summary") + private @Nullable String summary; + + public Optional summary() { + return Optional.ofNullable(this.summary); + } + + private GetSqlWarehouseHealth() {} + + private GetSqlWarehouseHealth(GetSqlWarehouseHealth $) { + this.details = $.details; + this.failureReason = $.failureReason; + this.message = $.message; + this.status = $.status; + this.summary = $.summary; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetSqlWarehouseHealth defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetSqlWarehouseHealth $; + + public Builder() { + $ = new GetSqlWarehouseHealth(); + } + + public Builder(GetSqlWarehouseHealth defaults) { + $ = new GetSqlWarehouseHealth(Objects.requireNonNull(defaults)); + } + + public Builder details(@Nullable String details) { + $.details = details; + return this; + } + + public Builder failureReason(@Nullable GetSqlWarehouseHealthFailureReason failureReason) { + $.failureReason = failureReason; + return this; + } + + public Builder message(@Nullable String message) { + $.message = message; + return this; + } + + public Builder status(@Nullable String status) { + $.status = status; + return this; + } + + public Builder summary(@Nullable String summary) { + $.summary = summary; + return this; + } + + public GetSqlWarehouseHealth build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthArgs.java new file mode 100644 index 00000000..85a173b7 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthArgs.java @@ -0,0 +1,132 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.GetSqlWarehouseHealthFailureReasonArgs; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetSqlWarehouseHealthArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetSqlWarehouseHealthArgs Empty = new GetSqlWarehouseHealthArgs(); + + @Import(name="details") + private @Nullable Output details; + + public Optional> details() { + return Optional.ofNullable(this.details); + } + + @Import(name="failureReason") + private @Nullable Output failureReason; + + public Optional> failureReason() { + return Optional.ofNullable(this.failureReason); + } + + @Import(name="message") + private @Nullable Output message; + + public Optional> message() { + return Optional.ofNullable(this.message); + } + + @Import(name="status") + private @Nullable Output status; + + public Optional> status() { + return Optional.ofNullable(this.status); + } + + @Import(name="summary") + private @Nullable Output summary; + + public Optional> summary() { + return Optional.ofNullable(this.summary); + } + + private GetSqlWarehouseHealthArgs() {} + + private GetSqlWarehouseHealthArgs(GetSqlWarehouseHealthArgs $) { + this.details = $.details; + this.failureReason = $.failureReason; + this.message = $.message; + this.status = $.status; + this.summary = $.summary; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetSqlWarehouseHealthArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetSqlWarehouseHealthArgs $; + + public Builder() { + $ = new GetSqlWarehouseHealthArgs(); + } + + public Builder(GetSqlWarehouseHealthArgs defaults) { + $ = new GetSqlWarehouseHealthArgs(Objects.requireNonNull(defaults)); + } + + public Builder details(@Nullable Output details) { + $.details = details; + return this; + } + + public Builder details(String details) { + return details(Output.of(details)); + } + + public Builder failureReason(@Nullable Output failureReason) { + $.failureReason = failureReason; + return this; + } + + public Builder failureReason(GetSqlWarehouseHealthFailureReasonArgs failureReason) { + return failureReason(Output.of(failureReason)); + } + + public Builder message(@Nullable Output message) { + $.message = message; + return this; + } + + public Builder message(String message) { + return message(Output.of(message)); + } + + public Builder status(@Nullable Output status) { + $.status = status; + return this; + } + + public Builder status(String status) { + return status(Output.of(status)); + } + + public Builder summary(@Nullable Output summary) { + $.summary = summary; + return this; + } + + public Builder summary(String summary) { + return summary(Output.of(summary)); + } + + public GetSqlWarehouseHealthArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReason.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReason.java new file mode 100644 index 00000000..3bda8942 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReason.java @@ -0,0 +1,86 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetSqlWarehouseHealthFailureReason extends com.pulumi.resources.InvokeArgs { + + public static final GetSqlWarehouseHealthFailureReason Empty = new GetSqlWarehouseHealthFailureReason(); + + @Import(name="code") + private @Nullable String code; + + public Optional code() { + return Optional.ofNullable(this.code); + } + + @Import(name="parameters") + private @Nullable Map parameters; + + public Optional> parameters() { + return Optional.ofNullable(this.parameters); + } + + @Import(name="type") + private @Nullable String type; + + public Optional type() { + return Optional.ofNullable(this.type); + } + + private GetSqlWarehouseHealthFailureReason() {} + + private GetSqlWarehouseHealthFailureReason(GetSqlWarehouseHealthFailureReason $) { + this.code = $.code; + this.parameters = $.parameters; + this.type = $.type; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetSqlWarehouseHealthFailureReason defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetSqlWarehouseHealthFailureReason $; + + public Builder() { + $ = new GetSqlWarehouseHealthFailureReason(); + } + + public Builder(GetSqlWarehouseHealthFailureReason defaults) { + $ = new GetSqlWarehouseHealthFailureReason(Objects.requireNonNull(defaults)); + } + + public Builder code(@Nullable String code) { + $.code = code; + return this; + } + + public Builder parameters(@Nullable Map parameters) { + $.parameters = parameters; + return this; + } + + public Builder type(@Nullable String type) { + $.type = type; + return this; + } + + public GetSqlWarehouseHealthFailureReason build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReasonArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReasonArgs.java new file mode 100644 index 00000000..61b53681 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseHealthFailureReasonArgs.java @@ -0,0 +1,99 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetSqlWarehouseHealthFailureReasonArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetSqlWarehouseHealthFailureReasonArgs Empty = new GetSqlWarehouseHealthFailureReasonArgs(); + + @Import(name="code") + private @Nullable Output code; + + public Optional> code() { + return Optional.ofNullable(this.code); + } + + @Import(name="parameters") + private @Nullable Output> parameters; + + public Optional>> parameters() { + return Optional.ofNullable(this.parameters); + } + + @Import(name="type") + private @Nullable Output type; + + public Optional> type() { + return Optional.ofNullable(this.type); + } + + private GetSqlWarehouseHealthFailureReasonArgs() {} + + private GetSqlWarehouseHealthFailureReasonArgs(GetSqlWarehouseHealthFailureReasonArgs $) { + this.code = $.code; + this.parameters = $.parameters; + this.type = $.type; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetSqlWarehouseHealthFailureReasonArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetSqlWarehouseHealthFailureReasonArgs $; + + public Builder() { + $ = new GetSqlWarehouseHealthFailureReasonArgs(); + } + + public Builder(GetSqlWarehouseHealthFailureReasonArgs defaults) { + $ = new GetSqlWarehouseHealthFailureReasonArgs(Objects.requireNonNull(defaults)); + } + + public Builder code(@Nullable Output code) { + $.code = code; + return this; + } + + public Builder code(String code) { + return code(Output.of(code)); + } + + public Builder parameters(@Nullable Output> parameters) { + $.parameters = parameters; + return this; + } + + public Builder parameters(Map parameters) { + return parameters(Output.of(parameters)); + } + + public Builder type(@Nullable Output type) { + $.type = type; + return this; + } + + public Builder type(String type) { + return type(Output.of(type)); + } + + public GetSqlWarehouseHealthFailureReasonArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParams.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParams.java index 9bcc19c5..b31820de 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParams.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParams.java @@ -4,7 +4,6 @@ package com.pulumi.databricks.inputs; import com.pulumi.core.annotations.Import; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.Integer; import java.lang.String; import java.util.Objects; @@ -16,13 +15,6 @@ public final class GetSqlWarehouseOdbcParams extends com.pulumi.resources.Invoke public static final GetSqlWarehouseOdbcParams Empty = new GetSqlWarehouseOdbcParams(); - @Import(name="host") - private @Nullable String host; - - public Optional host() { - return Optional.ofNullable(this.host); - } - @Import(name="hostname") private @Nullable String hostname; @@ -30,31 +22,30 @@ public Optional hostname() { return Optional.ofNullable(this.hostname); } - @Import(name="path", required=true) - private String path; + @Import(name="path") + private @Nullable String path; - public String path() { - return this.path; + public Optional path() { + return Optional.ofNullable(this.path); } - @Import(name="port", required=true) - private Integer port; + @Import(name="port") + private @Nullable Integer port; - public Integer port() { - return this.port; + public Optional port() { + return Optional.ofNullable(this.port); } - @Import(name="protocol", required=true) - private String protocol; + @Import(name="protocol") + private @Nullable String protocol; - public String protocol() { - return this.protocol; + public Optional protocol() { + return Optional.ofNullable(this.protocol); } private GetSqlWarehouseOdbcParams() {} private GetSqlWarehouseOdbcParams(GetSqlWarehouseOdbcParams $) { - this.host = $.host; this.hostname = $.hostname; this.path = $.path; this.port = $.port; @@ -79,41 +70,27 @@ public Builder(GetSqlWarehouseOdbcParams defaults) { $ = new GetSqlWarehouseOdbcParams(Objects.requireNonNull(defaults)); } - public Builder host(@Nullable String host) { - $.host = host; - return this; - } - public Builder hostname(@Nullable String hostname) { $.hostname = hostname; return this; } - public Builder path(String path) { + public Builder path(@Nullable String path) { $.path = path; return this; } - public Builder port(Integer port) { + public Builder port(@Nullable Integer port) { $.port = port; return this; } - public Builder protocol(String protocol) { + public Builder protocol(@Nullable String protocol) { $.protocol = protocol; return this; } public GetSqlWarehouseOdbcParams build() { - if ($.path == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParams", "path"); - } - if ($.port == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParams", "port"); - } - if ($.protocol == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParams", "protocol"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParamsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParamsArgs.java index 0a3cf81a..29052930 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParamsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseOdbcParamsArgs.java @@ -5,7 +5,6 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.Integer; import java.lang.String; import java.util.Objects; @@ -17,13 +16,6 @@ public final class GetSqlWarehouseOdbcParamsArgs extends com.pulumi.resources.Re public static final GetSqlWarehouseOdbcParamsArgs Empty = new GetSqlWarehouseOdbcParamsArgs(); - @Import(name="host") - private @Nullable Output host; - - public Optional> host() { - return Optional.ofNullable(this.host); - } - @Import(name="hostname") private @Nullable Output hostname; @@ -31,31 +23,30 @@ public Optional> hostname() { return Optional.ofNullable(this.hostname); } - @Import(name="path", required=true) - private Output path; + @Import(name="path") + private @Nullable Output path; - public Output path() { - return this.path; + public Optional> path() { + return Optional.ofNullable(this.path); } - @Import(name="port", required=true) - private Output port; + @Import(name="port") + private @Nullable Output port; - public Output port() { - return this.port; + public Optional> port() { + return Optional.ofNullable(this.port); } - @Import(name="protocol", required=true) - private Output protocol; + @Import(name="protocol") + private @Nullable Output protocol; - public Output protocol() { - return this.protocol; + public Optional> protocol() { + return Optional.ofNullable(this.protocol); } private GetSqlWarehouseOdbcParamsArgs() {} private GetSqlWarehouseOdbcParamsArgs(GetSqlWarehouseOdbcParamsArgs $) { - this.host = $.host; this.hostname = $.hostname; this.path = $.path; this.port = $.port; @@ -80,15 +71,6 @@ public Builder(GetSqlWarehouseOdbcParamsArgs defaults) { $ = new GetSqlWarehouseOdbcParamsArgs(Objects.requireNonNull(defaults)); } - public Builder host(@Nullable Output host) { - $.host = host; - return this; - } - - public Builder host(String host) { - return host(Output.of(host)); - } - public Builder hostname(@Nullable Output hostname) { $.hostname = hostname; return this; @@ -98,7 +80,7 @@ public Builder hostname(String hostname) { return hostname(Output.of(hostname)); } - public Builder path(Output path) { + public Builder path(@Nullable Output path) { $.path = path; return this; } @@ -107,7 +89,7 @@ public Builder path(String path) { return path(Output.of(path)); } - public Builder port(Output port) { + public Builder port(@Nullable Output port) { $.port = port; return this; } @@ -116,7 +98,7 @@ public Builder port(Integer port) { return port(Output.of(port)); } - public Builder protocol(Output protocol) { + public Builder protocol(@Nullable Output protocol) { $.protocol = protocol; return this; } @@ -126,15 +108,6 @@ public Builder protocol(String protocol) { } public GetSqlWarehouseOdbcParamsArgs build() { - if ($.path == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParamsArgs", "path"); - } - if ($.port == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParamsArgs", "port"); - } - if ($.protocol == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParamsArgs", "protocol"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehousePlainArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehousePlainArgs.java index 0d2478e8..72867a2f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehousePlainArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehousePlainArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.GetSqlWarehouseChannel; +import com.pulumi.databricks.inputs.GetSqlWarehouseHealth; import com.pulumi.databricks.inputs.GetSqlWarehouseOdbcParams; import com.pulumi.databricks.inputs.GetSqlWarehouseTags; import java.lang.Boolean; @@ -64,6 +65,21 @@ public Optional clusterSize() { return Optional.ofNullable(this.clusterSize); } + /** + * The username of the user who created the endpoint. + * + */ + @Import(name="creatorName") + private @Nullable String creatorName; + + /** + * @return The username of the user who created the endpoint. + * + */ + public Optional creatorName() { + return Optional.ofNullable(this.creatorName); + } + /** * ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. * @@ -109,6 +125,21 @@ public Optional enableServerlessCompute() { return Optional.ofNullable(this.enableServerlessCompute); } + /** + * Health status of the endpoint. + * + */ + @Import(name="health") + private @Nullable GetSqlWarehouseHealth health; + + /** + * @return Health status of the endpoint. + * + */ + public Optional health() { + return Optional.ofNullable(this.health); + } + /** * The ID of the SQL warehouse. * @@ -191,9 +222,32 @@ public Optional name() { return Optional.ofNullable(this.name); } + /** + * The current number of clusters used by the endpoint. + * + */ + @Import(name="numActiveSessions") + private @Nullable Integer numActiveSessions; + + /** + * @return The current number of clusters used by the endpoint. + * + */ + public Optional numActiveSessions() { + return Optional.ofNullable(this.numActiveSessions); + } + + /** + * The current number of clusters used by the endpoint. + * + */ @Import(name="numClusters") private @Nullable Integer numClusters; + /** + * @return The current number of clusters used by the endpoint. + * + */ public Optional numClusters() { return Optional.ofNullable(this.numClusters); } @@ -228,9 +282,17 @@ public Optional spotInstancePolicy() { return Optional.ofNullable(this.spotInstancePolicy); } + /** + * The current state of the endpoint. + * + */ @Import(name="state") private @Nullable String state; + /** + * @return The current state of the endpoint. + * + */ public Optional state() { return Optional.ofNullable(this.state); } @@ -250,26 +312,45 @@ public Optional tags() { return Optional.ofNullable(this.tags); } + /** + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + */ + @Import(name="warehouseType") + private @Nullable String warehouseType; + + /** + * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + */ + public Optional warehouseType() { + return Optional.ofNullable(this.warehouseType); + } + private GetSqlWarehousePlainArgs() {} private GetSqlWarehousePlainArgs(GetSqlWarehousePlainArgs $) { this.autoStopMins = $.autoStopMins; this.channel = $.channel; this.clusterSize = $.clusterSize; + this.creatorName = $.creatorName; this.dataSourceId = $.dataSourceId; this.enablePhoton = $.enablePhoton; this.enableServerlessCompute = $.enableServerlessCompute; + this.health = $.health; this.id = $.id; this.instanceProfileArn = $.instanceProfileArn; this.jdbcUrl = $.jdbcUrl; this.maxNumClusters = $.maxNumClusters; this.minNumClusters = $.minNumClusters; this.name = $.name; + this.numActiveSessions = $.numActiveSessions; this.numClusters = $.numClusters; this.odbcParams = $.odbcParams; this.spotInstancePolicy = $.spotInstancePolicy; this.state = $.state; this.tags = $.tags; + this.warehouseType = $.warehouseType; } public static Builder builder() { @@ -323,6 +404,17 @@ public Builder clusterSize(@Nullable String clusterSize) { return this; } + /** + * @param creatorName The username of the user who created the endpoint. + * + * @return builder + * + */ + public Builder creatorName(@Nullable String creatorName) { + $.creatorName = creatorName; + return this; + } + /** * @param dataSourceId ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. * @@ -356,6 +448,17 @@ public Builder enableServerlessCompute(@Nullable Boolean enableServerlessCompute return this; } + /** + * @param health Health status of the endpoint. + * + * @return builder + * + */ + public Builder health(@Nullable GetSqlWarehouseHealth health) { + $.health = health; + return this; + } + /** * @param id The ID of the SQL warehouse. * @@ -416,6 +519,23 @@ public Builder name(@Nullable String name) { return this; } + /** + * @param numActiveSessions The current number of clusters used by the endpoint. + * + * @return builder + * + */ + public Builder numActiveSessions(@Nullable Integer numActiveSessions) { + $.numActiveSessions = numActiveSessions; + return this; + } + + /** + * @param numClusters The current number of clusters used by the endpoint. + * + * @return builder + * + */ public Builder numClusters(@Nullable Integer numClusters) { $.numClusters = numClusters; return this; @@ -443,6 +563,12 @@ public Builder spotInstancePolicy(@Nullable String spotInstancePolicy) { return this; } + /** + * @param state The current state of the endpoint. + * + * @return builder + * + */ public Builder state(@Nullable String state) { $.state = state; return this; @@ -459,6 +585,17 @@ public Builder tags(@Nullable GetSqlWarehouseTags tags) { return this; } + /** + * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + * @return builder + * + */ + public Builder warehouseType(@Nullable String warehouseType) { + $.warehouseType = warehouseType; + return this; + } + public GetSqlWarehousePlainArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTags.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTags.java index 253de9b5..5d9f10d8 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTags.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTags.java @@ -5,20 +5,21 @@ import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.GetSqlWarehouseTagsCustomTag; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.util.List; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class GetSqlWarehouseTags extends com.pulumi.resources.InvokeArgs { public static final GetSqlWarehouseTags Empty = new GetSqlWarehouseTags(); - @Import(name="customTags", required=true) - private List customTags; + @Import(name="customTags") + private @Nullable List customTags; - public List customTags() { - return this.customTags; + public Optional> customTags() { + return Optional.ofNullable(this.customTags); } private GetSqlWarehouseTags() {} @@ -45,7 +46,7 @@ public Builder(GetSqlWarehouseTags defaults) { $ = new GetSqlWarehouseTags(Objects.requireNonNull(defaults)); } - public Builder customTags(List customTags) { + public Builder customTags(@Nullable List customTags) { $.customTags = customTags; return this; } @@ -55,9 +56,6 @@ public Builder customTags(GetSqlWarehouseTagsCustomTag... customTags) { } public GetSqlWarehouseTags build() { - if ($.customTags == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTags", "customTags"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsArgs.java index c07d30f1..78212598 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsArgs.java @@ -6,20 +6,21 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.GetSqlWarehouseTagsCustomTagArgs; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.util.List; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class GetSqlWarehouseTagsArgs extends com.pulumi.resources.ResourceArgs { public static final GetSqlWarehouseTagsArgs Empty = new GetSqlWarehouseTagsArgs(); - @Import(name="customTags", required=true) - private Output> customTags; + @Import(name="customTags") + private @Nullable Output> customTags; - public Output> customTags() { - return this.customTags; + public Optional>> customTags() { + return Optional.ofNullable(this.customTags); } private GetSqlWarehouseTagsArgs() {} @@ -46,7 +47,7 @@ public Builder(GetSqlWarehouseTagsArgs defaults) { $ = new GetSqlWarehouseTagsArgs(Objects.requireNonNull(defaults)); } - public Builder customTags(Output> customTags) { + public Builder customTags(@Nullable Output> customTags) { $.customTags = customTags; return this; } @@ -60,9 +61,6 @@ public Builder customTags(GetSqlWarehouseTagsCustomTagArgs... customTags) { } public GetSqlWarehouseTagsArgs build() { - if ($.customTags == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTagsArgs", "customTags"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTag.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTag.java index 8cebd068..5a35e0f0 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTag.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTag.java @@ -4,27 +4,28 @@ package com.pulumi.databricks.inputs; import com.pulumi.core.annotations.Import; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.String; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class GetSqlWarehouseTagsCustomTag extends com.pulumi.resources.InvokeArgs { public static final GetSqlWarehouseTagsCustomTag Empty = new GetSqlWarehouseTagsCustomTag(); - @Import(name="key", required=true) - private String key; + @Import(name="key") + private @Nullable String key; - public String key() { - return this.key; + public Optional key() { + return Optional.ofNullable(this.key); } - @Import(name="value", required=true) - private String value; + @Import(name="value") + private @Nullable String value; - public String value() { - return this.value; + public Optional value() { + return Optional.ofNullable(this.value); } private GetSqlWarehouseTagsCustomTag() {} @@ -52,23 +53,17 @@ public Builder(GetSqlWarehouseTagsCustomTag defaults) { $ = new GetSqlWarehouseTagsCustomTag(Objects.requireNonNull(defaults)); } - public Builder key(String key) { + public Builder key(@Nullable String key) { $.key = key; return this; } - public Builder value(String value) { + public Builder value(@Nullable String value) { $.value = value; return this; } public GetSqlWarehouseTagsCustomTag build() { - if ($.key == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTagsCustomTag", "key"); - } - if ($.value == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTagsCustomTag", "value"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTagArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTagArgs.java index 96b2cb87..e427917b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTagArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetSqlWarehouseTagsCustomTagArgs.java @@ -5,27 +5,28 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.String; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class GetSqlWarehouseTagsCustomTagArgs extends com.pulumi.resources.ResourceArgs { public static final GetSqlWarehouseTagsCustomTagArgs Empty = new GetSqlWarehouseTagsCustomTagArgs(); - @Import(name="key", required=true) - private Output key; + @Import(name="key") + private @Nullable Output key; - public Output key() { - return this.key; + public Optional> key() { + return Optional.ofNullable(this.key); } - @Import(name="value", required=true) - private Output value; + @Import(name="value") + private @Nullable Output value; - public Output value() { - return this.value; + public Optional> value() { + return Optional.ofNullable(this.value); } private GetSqlWarehouseTagsCustomTagArgs() {} @@ -53,7 +54,7 @@ public Builder(GetSqlWarehouseTagsCustomTagArgs defaults) { $ = new GetSqlWarehouseTagsCustomTagArgs(Objects.requireNonNull(defaults)); } - public Builder key(Output key) { + public Builder key(@Nullable Output key) { $.key = key; return this; } @@ -62,7 +63,7 @@ public Builder key(String key) { return key(Output.of(key)); } - public Builder value(Output value) { + public Builder value(@Nullable Output value) { $.value = value; return this; } @@ -72,12 +73,6 @@ public Builder value(String value) { } public GetSqlWarehouseTagsCustomTagArgs build() { - if ($.key == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTagsCustomTagArgs", "key"); - } - if ($.value == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTagsCustomTagArgs", "value"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantState.java new file mode 100644 index 00000000..ed252fc3 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantState.java @@ -0,0 +1,306 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GrantState extends com.pulumi.resources.ResourceArgs { + + public static final GrantState Empty = new GrantState(); + + @Import(name="catalog") + private @Nullable Output catalog; + + public Optional> catalog() { + return Optional.ofNullable(this.catalog); + } + + @Import(name="externalLocation") + private @Nullable Output externalLocation; + + public Optional> externalLocation() { + return Optional.ofNullable(this.externalLocation); + } + + @Import(name="foreignConnection") + private @Nullable Output foreignConnection; + + public Optional> foreignConnection() { + return Optional.ofNullable(this.foreignConnection); + } + + @Import(name="function") + private @Nullable Output function; + + public Optional> function() { + return Optional.ofNullable(this.function); + } + + @Import(name="metastore") + private @Nullable Output metastore; + + public Optional> metastore() { + return Optional.ofNullable(this.metastore); + } + + @Import(name="model") + private @Nullable Output model; + + public Optional> model() { + return Optional.ofNullable(this.model); + } + + @Import(name="pipeline") + private @Nullable Output pipeline; + + public Optional> pipeline() { + return Optional.ofNullable(this.pipeline); + } + + @Import(name="principal") + private @Nullable Output principal; + + public Optional> principal() { + return Optional.ofNullable(this.principal); + } + + @Import(name="privileges") + private @Nullable Output> privileges; + + public Optional>> privileges() { + return Optional.ofNullable(this.privileges); + } + + @Import(name="recipient") + private @Nullable Output recipient; + + public Optional> recipient() { + return Optional.ofNullable(this.recipient); + } + + @Import(name="schema") + private @Nullable Output schema; + + public Optional> schema() { + return Optional.ofNullable(this.schema); + } + + @Import(name="share") + private @Nullable Output share; + + public Optional> share() { + return Optional.ofNullable(this.share); + } + + @Import(name="storageCredential") + private @Nullable Output storageCredential; + + public Optional> storageCredential() { + return Optional.ofNullable(this.storageCredential); + } + + @Import(name="table") + private @Nullable Output table; + + public Optional> table() { + return Optional.ofNullable(this.table); + } + + @Import(name="volume") + private @Nullable Output volume; + + public Optional> volume() { + return Optional.ofNullable(this.volume); + } + + private GrantState() {} + + private GrantState(GrantState $) { + this.catalog = $.catalog; + this.externalLocation = $.externalLocation; + this.foreignConnection = $.foreignConnection; + this.function = $.function; + this.metastore = $.metastore; + this.model = $.model; + this.pipeline = $.pipeline; + this.principal = $.principal; + this.privileges = $.privileges; + this.recipient = $.recipient; + this.schema = $.schema; + this.share = $.share; + this.storageCredential = $.storageCredential; + this.table = $.table; + this.volume = $.volume; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GrantState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GrantState $; + + public Builder() { + $ = new GrantState(); + } + + public Builder(GrantState defaults) { + $ = new GrantState(Objects.requireNonNull(defaults)); + } + + public Builder catalog(@Nullable Output catalog) { + $.catalog = catalog; + return this; + } + + public Builder catalog(String catalog) { + return catalog(Output.of(catalog)); + } + + public Builder externalLocation(@Nullable Output externalLocation) { + $.externalLocation = externalLocation; + return this; + } + + public Builder externalLocation(String externalLocation) { + return externalLocation(Output.of(externalLocation)); + } + + public Builder foreignConnection(@Nullable Output foreignConnection) { + $.foreignConnection = foreignConnection; + return this; + } + + public Builder foreignConnection(String foreignConnection) { + return foreignConnection(Output.of(foreignConnection)); + } + + public Builder function(@Nullable Output function) { + $.function = function; + return this; + } + + public Builder function(String function) { + return function(Output.of(function)); + } + + public Builder metastore(@Nullable Output metastore) { + $.metastore = metastore; + return this; + } + + public Builder metastore(String metastore) { + return metastore(Output.of(metastore)); + } + + public Builder model(@Nullable Output model) { + $.model = model; + return this; + } + + public Builder model(String model) { + return model(Output.of(model)); + } + + public Builder pipeline(@Nullable Output pipeline) { + $.pipeline = pipeline; + return this; + } + + public Builder pipeline(String pipeline) { + return pipeline(Output.of(pipeline)); + } + + public Builder principal(@Nullable Output principal) { + $.principal = principal; + return this; + } + + public Builder principal(String principal) { + return principal(Output.of(principal)); + } + + public Builder privileges(@Nullable Output> privileges) { + $.privileges = privileges; + return this; + } + + public Builder privileges(List privileges) { + return privileges(Output.of(privileges)); + } + + public Builder privileges(String... privileges) { + return privileges(List.of(privileges)); + } + + public Builder recipient(@Nullable Output recipient) { + $.recipient = recipient; + return this; + } + + public Builder recipient(String recipient) { + return recipient(Output.of(recipient)); + } + + public Builder schema(@Nullable Output schema) { + $.schema = schema; + return this; + } + + public Builder schema(String schema) { + return schema(Output.of(schema)); + } + + public Builder share(@Nullable Output share) { + $.share = share; + return this; + } + + public Builder share(String share) { + return share(Output.of(share)); + } + + public Builder storageCredential(@Nullable Output storageCredential) { + $.storageCredential = storageCredential; + return this; + } + + public Builder storageCredential(String storageCredential) { + return storageCredential(Output.of(storageCredential)); + } + + public Builder table(@Nullable Output table) { + $.table = table; + return this; + } + + public Builder table(String table) { + return table(Output.of(table)); + } + + public Builder volume(@Nullable Output volume) { + $.volume = volume; + return this; + } + + public Builder volume(String volume) { + return volume(Output.of(volume)); + } + + public GrantState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessState.java index d6bfaa4c..1bbe6717 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessState.java @@ -120,6 +120,13 @@ public Optional> readOnly() { return Optional.ofNullable(this.readOnly); } + @Import(name="skipValidation") + private @Nullable Output skipValidation; + + public Optional> skipValidation() { + return Optional.ofNullable(this.skipValidation); + } + private MetastoreDataAccessState() {} private MetastoreDataAccessState(MetastoreDataAccessState $) { @@ -136,6 +143,7 @@ private MetastoreDataAccessState(MetastoreDataAccessState $) { this.name = $.name; this.owner = $.owner; this.readOnly = $.readOnly; + this.skipValidation = $.skipValidation; } public static Builder builder() { @@ -285,6 +293,15 @@ public Builder readOnly(Boolean readOnly) { return readOnly(Output.of(readOnly)); } + public Builder skipValidation(@Nullable Output skipValidation) { + $.skipValidation = skipValidation; + return this; + } + + public Builder skipValidation(Boolean skipValidation) { + return skipValidation(Output.of(skipValidation)); + } + public MetastoreDataAccessState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/RepoState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/RepoState.java index d61e406e..4ae0d5fa 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/RepoState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/RepoState.java @@ -113,6 +113,21 @@ public Optional> url() { return Optional.ofNullable(this.url); } + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + @Import(name="workspacePath") + private @Nullable Output workspacePath; + + /** + * @return path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + public Optional> workspacePath() { + return Optional.ofNullable(this.workspacePath); + } + private RepoState() {} private RepoState(RepoState $) { @@ -123,6 +138,7 @@ private RepoState(RepoState $) { this.sparseCheckout = $.sparseCheckout; this.tag = $.tag; this.url = $.url; + this.workspacePath = $.workspacePath; } public static Builder builder() { @@ -278,6 +294,27 @@ public Builder url(String url) { return url(Output.of(url)); } + /** + * @param workspacePath path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + * @return builder + * + */ + public Builder workspacePath(@Nullable Output workspacePath) { + $.workspacePath = workspacePath; + return this; + } + + /** + * @param workspacePath path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + * @return builder + * + */ + public Builder workspacePath(String workspacePath) { + return workspacePath(Output.of(workspacePath)); + } + public RepoState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointChannelArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointChannelArgs.java index 18ee3537..b1ec81f4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointChannelArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointChannelArgs.java @@ -15,6 +15,13 @@ public final class SqlEndpointChannelArgs extends com.pulumi.resources.ResourceA public static final SqlEndpointChannelArgs Empty = new SqlEndpointChannelArgs(); + @Import(name="dbsqlVersion") + private @Nullable Output dbsqlVersion; + + public Optional> dbsqlVersion() { + return Optional.ofNullable(this.dbsqlVersion); + } + /** * Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. * @@ -33,6 +40,7 @@ public Optional> name() { private SqlEndpointChannelArgs() {} private SqlEndpointChannelArgs(SqlEndpointChannelArgs $) { + this.dbsqlVersion = $.dbsqlVersion; this.name = $.name; } @@ -54,6 +62,15 @@ public Builder(SqlEndpointChannelArgs defaults) { $ = new SqlEndpointChannelArgs(Objects.requireNonNull(defaults)); } + public Builder dbsqlVersion(@Nullable Output dbsqlVersion) { + $.dbsqlVersion = dbsqlVersion; + return this; + } + + public Builder dbsqlVersion(String dbsqlVersion) { + return dbsqlVersion(Output.of(dbsqlVersion)); + } + /** * @param name Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthArgs.java new file mode 100644 index 00000000..a8e20842 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthArgs.java @@ -0,0 +1,132 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.SqlEndpointHealthFailureReasonArgs; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class SqlEndpointHealthArgs extends com.pulumi.resources.ResourceArgs { + + public static final SqlEndpointHealthArgs Empty = new SqlEndpointHealthArgs(); + + @Import(name="details") + private @Nullable Output details; + + public Optional> details() { + return Optional.ofNullable(this.details); + } + + @Import(name="failureReason") + private @Nullable Output failureReason; + + public Optional> failureReason() { + return Optional.ofNullable(this.failureReason); + } + + @Import(name="message") + private @Nullable Output message; + + public Optional> message() { + return Optional.ofNullable(this.message); + } + + @Import(name="status") + private @Nullable Output status; + + public Optional> status() { + return Optional.ofNullable(this.status); + } + + @Import(name="summary") + private @Nullable Output summary; + + public Optional> summary() { + return Optional.ofNullable(this.summary); + } + + private SqlEndpointHealthArgs() {} + + private SqlEndpointHealthArgs(SqlEndpointHealthArgs $) { + this.details = $.details; + this.failureReason = $.failureReason; + this.message = $.message; + this.status = $.status; + this.summary = $.summary; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SqlEndpointHealthArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SqlEndpointHealthArgs $; + + public Builder() { + $ = new SqlEndpointHealthArgs(); + } + + public Builder(SqlEndpointHealthArgs defaults) { + $ = new SqlEndpointHealthArgs(Objects.requireNonNull(defaults)); + } + + public Builder details(@Nullable Output details) { + $.details = details; + return this; + } + + public Builder details(String details) { + return details(Output.of(details)); + } + + public Builder failureReason(@Nullable Output failureReason) { + $.failureReason = failureReason; + return this; + } + + public Builder failureReason(SqlEndpointHealthFailureReasonArgs failureReason) { + return failureReason(Output.of(failureReason)); + } + + public Builder message(@Nullable Output message) { + $.message = message; + return this; + } + + public Builder message(String message) { + return message(Output.of(message)); + } + + public Builder status(@Nullable Output status) { + $.status = status; + return this; + } + + public Builder status(String status) { + return status(Output.of(status)); + } + + public Builder summary(@Nullable Output summary) { + $.summary = summary; + return this; + } + + public Builder summary(String summary) { + return summary(Output.of(summary)); + } + + public SqlEndpointHealthArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthFailureReasonArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthFailureReasonArgs.java new file mode 100644 index 00000000..eba879a1 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointHealthFailureReasonArgs.java @@ -0,0 +1,99 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class SqlEndpointHealthFailureReasonArgs extends com.pulumi.resources.ResourceArgs { + + public static final SqlEndpointHealthFailureReasonArgs Empty = new SqlEndpointHealthFailureReasonArgs(); + + @Import(name="code") + private @Nullable Output code; + + public Optional> code() { + return Optional.ofNullable(this.code); + } + + @Import(name="parameters") + private @Nullable Output> parameters; + + public Optional>> parameters() { + return Optional.ofNullable(this.parameters); + } + + @Import(name="type") + private @Nullable Output type; + + public Optional> type() { + return Optional.ofNullable(this.type); + } + + private SqlEndpointHealthFailureReasonArgs() {} + + private SqlEndpointHealthFailureReasonArgs(SqlEndpointHealthFailureReasonArgs $) { + this.code = $.code; + this.parameters = $.parameters; + this.type = $.type; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SqlEndpointHealthFailureReasonArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SqlEndpointHealthFailureReasonArgs $; + + public Builder() { + $ = new SqlEndpointHealthFailureReasonArgs(); + } + + public Builder(SqlEndpointHealthFailureReasonArgs defaults) { + $ = new SqlEndpointHealthFailureReasonArgs(Objects.requireNonNull(defaults)); + } + + public Builder code(@Nullable Output code) { + $.code = code; + return this; + } + + public Builder code(String code) { + return code(Output.of(code)); + } + + public Builder parameters(@Nullable Output> parameters) { + $.parameters = parameters; + return this; + } + + public Builder parameters(Map parameters) { + return parameters(Output.of(parameters)); + } + + public Builder type(@Nullable Output type) { + $.type = type; + return this; + } + + public Builder type(String type) { + return type(Output.of(type)); + } + + public SqlEndpointHealthFailureReasonArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointOdbcParamsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointOdbcParamsArgs.java index 49e07ae7..b420b977 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointOdbcParamsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointOdbcParamsArgs.java @@ -5,7 +5,6 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.Integer; import java.lang.String; import java.util.Objects; @@ -17,13 +16,6 @@ public final class SqlEndpointOdbcParamsArgs extends com.pulumi.resources.Resour public static final SqlEndpointOdbcParamsArgs Empty = new SqlEndpointOdbcParamsArgs(); - @Import(name="host") - private @Nullable Output host; - - public Optional> host() { - return Optional.ofNullable(this.host); - } - @Import(name="hostname") private @Nullable Output hostname; @@ -31,31 +23,30 @@ public Optional> hostname() { return Optional.ofNullable(this.hostname); } - @Import(name="path", required=true) - private Output path; + @Import(name="path") + private @Nullable Output path; - public Output path() { - return this.path; + public Optional> path() { + return Optional.ofNullable(this.path); } - @Import(name="port", required=true) - private Output port; + @Import(name="port") + private @Nullable Output port; - public Output port() { - return this.port; + public Optional> port() { + return Optional.ofNullable(this.port); } - @Import(name="protocol", required=true) - private Output protocol; + @Import(name="protocol") + private @Nullable Output protocol; - public Output protocol() { - return this.protocol; + public Optional> protocol() { + return Optional.ofNullable(this.protocol); } private SqlEndpointOdbcParamsArgs() {} private SqlEndpointOdbcParamsArgs(SqlEndpointOdbcParamsArgs $) { - this.host = $.host; this.hostname = $.hostname; this.path = $.path; this.port = $.port; @@ -80,15 +71,6 @@ public Builder(SqlEndpointOdbcParamsArgs defaults) { $ = new SqlEndpointOdbcParamsArgs(Objects.requireNonNull(defaults)); } - public Builder host(@Nullable Output host) { - $.host = host; - return this; - } - - public Builder host(String host) { - return host(Output.of(host)); - } - public Builder hostname(@Nullable Output hostname) { $.hostname = hostname; return this; @@ -98,7 +80,7 @@ public Builder hostname(String hostname) { return hostname(Output.of(hostname)); } - public Builder path(Output path) { + public Builder path(@Nullable Output path) { $.path = path; return this; } @@ -107,7 +89,7 @@ public Builder path(String path) { return path(Output.of(path)); } - public Builder port(Output port) { + public Builder port(@Nullable Output port) { $.port = port; return this; } @@ -116,7 +98,7 @@ public Builder port(Integer port) { return port(Output.of(port)); } - public Builder protocol(Output protocol) { + public Builder protocol(@Nullable Output protocol) { $.protocol = protocol; return this; } @@ -126,15 +108,6 @@ public Builder protocol(String protocol) { } public SqlEndpointOdbcParamsArgs build() { - if ($.path == null) { - throw new MissingRequiredPropertyException("SqlEndpointOdbcParamsArgs", "path"); - } - if ($.port == null) { - throw new MissingRequiredPropertyException("SqlEndpointOdbcParamsArgs", "port"); - } - if ($.protocol == null) { - throw new MissingRequiredPropertyException("SqlEndpointOdbcParamsArgs", "protocol"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointState.java index cef14e67..3d92b62c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointState.java @@ -6,11 +6,13 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.SqlEndpointChannelArgs; +import com.pulumi.databricks.inputs.SqlEndpointHealthArgs; import com.pulumi.databricks.inputs.SqlEndpointOdbcParamsArgs; import com.pulumi.databricks.inputs.SqlEndpointTagsArgs; import java.lang.Boolean; import java.lang.Integer; import java.lang.String; +import java.util.List; import java.util.Objects; import java.util.Optional; import javax.annotation.Nullable; @@ -65,6 +67,21 @@ public Optional> clusterSize() { return Optional.ofNullable(this.clusterSize); } + /** + * The username of the user who created the endpoint. + * + */ + @Import(name="creatorName") + private @Nullable Output creatorName; + + /** + * @return The username of the user who created the endpoint. + * + */ + public Optional> creatorName() { + return Optional.ofNullable(this.creatorName); + } + /** * ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. * @@ -118,6 +135,21 @@ public Optional> enableServerlessCompute() { return Optional.ofNullable(this.enableServerlessCompute); } + /** + * Health status of the endpoint. + * + */ + @Import(name="healths") + private @Nullable Output> healths; + + /** + * @return Health status of the endpoint. + * + */ + public Optional>> healths() { + return Optional.ofNullable(this.healths); + } + @Import(name="instanceProfileArn") private @Nullable Output instanceProfileArn; @@ -185,9 +217,32 @@ public Optional> name() { return Optional.ofNullable(this.name); } + /** + * The current number of clusters used by the endpoint. + * + */ + @Import(name="numActiveSessions") + private @Nullable Output numActiveSessions; + + /** + * @return The current number of clusters used by the endpoint. + * + */ + public Optional> numActiveSessions() { + return Optional.ofNullable(this.numActiveSessions); + } + + /** + * The current number of clusters used by the endpoint. + * + */ @Import(name="numClusters") private @Nullable Output numClusters; + /** + * @return The current number of clusters used by the endpoint. + * + */ public Optional> numClusters() { return Optional.ofNullable(this.numClusters); } @@ -222,9 +277,17 @@ public Optional> spotInstancePolicy() { return Optional.ofNullable(this.spotInstancePolicy); } + /** + * The current state of the endpoint. + * + */ @Import(name="state") private @Nullable Output state; + /** + * @return The current state of the endpoint. + * + */ public Optional> state() { return Optional.ofNullable(this.state); } @@ -245,14 +308,14 @@ public Optional> tags() { } /** - * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * */ @Import(name="warehouseType") private @Nullable Output warehouseType; /** - * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * */ public Optional> warehouseType() { @@ -265,14 +328,17 @@ private SqlEndpointState(SqlEndpointState $) { this.autoStopMins = $.autoStopMins; this.channel = $.channel; this.clusterSize = $.clusterSize; + this.creatorName = $.creatorName; this.dataSourceId = $.dataSourceId; this.enablePhoton = $.enablePhoton; this.enableServerlessCompute = $.enableServerlessCompute; + this.healths = $.healths; this.instanceProfileArn = $.instanceProfileArn; this.jdbcUrl = $.jdbcUrl; this.maxNumClusters = $.maxNumClusters; this.minNumClusters = $.minNumClusters; this.name = $.name; + this.numActiveSessions = $.numActiveSessions; this.numClusters = $.numClusters; this.odbcParams = $.odbcParams; this.spotInstancePolicy = $.spotInstancePolicy; @@ -362,6 +428,27 @@ public Builder clusterSize(String clusterSize) { return clusterSize(Output.of(clusterSize)); } + /** + * @param creatorName The username of the user who created the endpoint. + * + * @return builder + * + */ + public Builder creatorName(@Nullable Output creatorName) { + $.creatorName = creatorName; + return this; + } + + /** + * @param creatorName The username of the user who created the endpoint. + * + * @return builder + * + */ + public Builder creatorName(String creatorName) { + return creatorName(Output.of(creatorName)); + } + /** * @param dataSourceId ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. * @@ -433,6 +520,37 @@ public Builder enableServerlessCompute(Boolean enableServerlessCompute) { return enableServerlessCompute(Output.of(enableServerlessCompute)); } + /** + * @param healths Health status of the endpoint. + * + * @return builder + * + */ + public Builder healths(@Nullable Output> healths) { + $.healths = healths; + return this; + } + + /** + * @param healths Health status of the endpoint. + * + * @return builder + * + */ + public Builder healths(List healths) { + return healths(Output.of(healths)); + } + + /** + * @param healths Health status of the endpoint. + * + * @return builder + * + */ + public Builder healths(SqlEndpointHealthArgs... healths) { + return healths(List.of(healths)); + } + public Builder instanceProfileArn(@Nullable Output instanceProfileArn) { $.instanceProfileArn = instanceProfileArn; return this; @@ -526,11 +644,44 @@ public Builder name(String name) { return name(Output.of(name)); } + /** + * @param numActiveSessions The current number of clusters used by the endpoint. + * + * @return builder + * + */ + public Builder numActiveSessions(@Nullable Output numActiveSessions) { + $.numActiveSessions = numActiveSessions; + return this; + } + + /** + * @param numActiveSessions The current number of clusters used by the endpoint. + * + * @return builder + * + */ + public Builder numActiveSessions(Integer numActiveSessions) { + return numActiveSessions(Output.of(numActiveSessions)); + } + + /** + * @param numClusters The current number of clusters used by the endpoint. + * + * @return builder + * + */ public Builder numClusters(@Nullable Output numClusters) { $.numClusters = numClusters; return this; } + /** + * @param numClusters The current number of clusters used by the endpoint. + * + * @return builder + * + */ public Builder numClusters(Integer numClusters) { return numClusters(Output.of(numClusters)); } @@ -577,11 +728,23 @@ public Builder spotInstancePolicy(String spotInstancePolicy) { return spotInstancePolicy(Output.of(spotInstancePolicy)); } + /** + * @param state The current state of the endpoint. + * + * @return builder + * + */ public Builder state(@Nullable Output state) { $.state = state; return this; } + /** + * @param state The current state of the endpoint. + * + * @return builder + * + */ public Builder state(String state) { return state(Output.of(state)); } @@ -608,7 +771,7 @@ public Builder tags(SqlEndpointTagsArgs tags) { } /** - * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * * @return builder * @@ -619,7 +782,7 @@ public Builder warehouseType(@Nullable Output warehouseType) { } /** - * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * @param warehouseType SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointTagsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointTagsArgs.java index 3c4de671..3c1f2297 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointTagsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlEndpointTagsArgs.java @@ -6,20 +6,21 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.databricks.inputs.SqlEndpointTagsCustomTagArgs; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.util.List; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class SqlEndpointTagsArgs extends com.pulumi.resources.ResourceArgs { public static final SqlEndpointTagsArgs Empty = new SqlEndpointTagsArgs(); - @Import(name="customTags", required=true) - private Output> customTags; + @Import(name="customTags") + private @Nullable Output> customTags; - public Output> customTags() { - return this.customTags; + public Optional>> customTags() { + return Optional.ofNullable(this.customTags); } private SqlEndpointTagsArgs() {} @@ -46,7 +47,7 @@ public Builder(SqlEndpointTagsArgs defaults) { $ = new SqlEndpointTagsArgs(Objects.requireNonNull(defaults)); } - public Builder customTags(Output> customTags) { + public Builder customTags(@Nullable Output> customTags) { $.customTags = customTags; return this; } @@ -60,9 +61,6 @@ public Builder customTags(SqlEndpointTagsCustomTagArgs... customTags) { } public SqlEndpointTagsArgs build() { - if ($.customTags == null) { - throw new MissingRequiredPropertyException("SqlEndpointTagsArgs", "customTags"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java index 929035f0..d9547c01 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java @@ -149,6 +149,21 @@ public Optional> readOnly() { return Optional.ofNullable(this.readOnly); } + /** + * Suppress validation errors if any & force save the storage credential. + * + */ + @Import(name="skipValidation") + private @Nullable Output skipValidation; + + /** + * @return Suppress validation errors if any & force save the storage credential. + * + */ + public Optional> skipValidation() { + return Optional.ofNullable(this.skipValidation); + } + private StorageCredentialState() {} private StorageCredentialState(StorageCredentialState $) { @@ -164,6 +179,7 @@ private StorageCredentialState(StorageCredentialState $) { this.name = $.name; this.owner = $.owner; this.readOnly = $.readOnly; + this.skipValidation = $.skipValidation; } public static Builder builder() { @@ -356,6 +372,27 @@ public Builder readOnly(Boolean readOnly) { return readOnly(Output.of(readOnly)); } + /** + * @param skipValidation Suppress validation errors if any & force save the storage credential. + * + * @return builder + * + */ + public Builder skipValidation(@Nullable Output skipValidation) { + $.skipValidation = skipValidation; + return this; + } + + /** + * @param skipValidation Suppress validation errors if any & force save the storage credential. + * + * @return builder + * + */ + public Builder skipValidation(Boolean skipValidation) { + return skipValidation(Output.of(skipValidation)); + } + public StorageCredentialState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreMetastoreInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreMetastoreInfo.java new file mode 100644 index 00000000..38f87150 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreMetastoreInfo.java @@ -0,0 +1,407 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class GetCurrentMetastoreMetastoreInfo { + private @Nullable String cloud; + /** + * @return Timestamp (in milliseconds) when the current metastore was created. + * + */ + private @Nullable Integer createdAt; + /** + * @return the ID of the identity that created the current metastore. + * + */ + private @Nullable String createdBy; + /** + * @return the ID of the default data access configuration. + * + */ + private @Nullable String defaultDataAccessConfigId; + /** + * @return The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + */ + private @Nullable String deltaSharingOrganizationName; + /** + * @return the expiration duration in seconds on recipient data access tokens. + * + */ + private @Nullable Integer deltaSharingRecipientTokenLifetimeInSeconds; + /** + * @return Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + */ + private @Nullable String deltaSharingScope; + /** + * @return Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + */ + private @Nullable String globalMetastoreId; + /** + * @return Metastore ID. + * + */ + private @Nullable String metastoreId; + /** + * @return Name of metastore. + * + */ + private @Nullable String name; + /** + * @return Username/group name/sp application_id of the metastore owner. + * + */ + private @Nullable String owner; + /** + * @return the version of the privilege model used by the metastore. + * + */ + private @Nullable String privilegeModelVersion; + /** + * @return (Mandatory for account-level) The region of the metastore. + * + */ + private @Nullable String region; + /** + * @return Path on cloud storage account, where managed `databricks.Table` are stored. + * + */ + private @Nullable String storageRoot; + /** + * @return ID of a storage credential used for the `storage_root`. + * + */ + private @Nullable String storageRootCredentialId; + /** + * @return Name of a storage credential used for the `storage_root`. + * + */ + private @Nullable String storageRootCredentialName; + /** + * @return Timestamp (in milliseconds) when the current metastore was updated. + * + */ + private @Nullable Integer updatedAt; + /** + * @return the ID of the identity that updated the current metastore. + * + */ + private @Nullable String updatedBy; + + private GetCurrentMetastoreMetastoreInfo() {} + public Optional cloud() { + return Optional.ofNullable(this.cloud); + } + /** + * @return Timestamp (in milliseconds) when the current metastore was created. + * + */ + public Optional createdAt() { + return Optional.ofNullable(this.createdAt); + } + /** + * @return the ID of the identity that created the current metastore. + * + */ + public Optional createdBy() { + return Optional.ofNullable(this.createdBy); + } + /** + * @return the ID of the default data access configuration. + * + */ + public Optional defaultDataAccessConfigId() { + return Optional.ofNullable(this.defaultDataAccessConfigId); + } + /** + * @return The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + * + */ + public Optional deltaSharingOrganizationName() { + return Optional.ofNullable(this.deltaSharingOrganizationName); + } + /** + * @return the expiration duration in seconds on recipient data access tokens. + * + */ + public Optional deltaSharingRecipientTokenLifetimeInSeconds() { + return Optional.ofNullable(this.deltaSharingRecipientTokenLifetimeInSeconds); + } + /** + * @return Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + * + */ + public Optional deltaSharingScope() { + return Optional.ofNullable(this.deltaSharingScope); + } + /** + * @return Identifier in form of `<cloud>:<region>:<metastore_id>` for use in Databricks to Databricks Delta Sharing. + * + */ + public Optional globalMetastoreId() { + return Optional.ofNullable(this.globalMetastoreId); + } + /** + * @return Metastore ID. + * + */ + public Optional metastoreId() { + return Optional.ofNullable(this.metastoreId); + } + /** + * @return Name of metastore. + * + */ + public Optional name() { + return Optional.ofNullable(this.name); + } + /** + * @return Username/group name/sp application_id of the metastore owner. + * + */ + public Optional owner() { + return Optional.ofNullable(this.owner); + } + /** + * @return the version of the privilege model used by the metastore. + * + */ + public Optional privilegeModelVersion() { + return Optional.ofNullable(this.privilegeModelVersion); + } + /** + * @return (Mandatory for account-level) The region of the metastore. + * + */ + public Optional region() { + return Optional.ofNullable(this.region); + } + /** + * @return Path on cloud storage account, where managed `databricks.Table` are stored. + * + */ + public Optional storageRoot() { + return Optional.ofNullable(this.storageRoot); + } + /** + * @return ID of a storage credential used for the `storage_root`. + * + */ + public Optional storageRootCredentialId() { + return Optional.ofNullable(this.storageRootCredentialId); + } + /** + * @return Name of a storage credential used for the `storage_root`. + * + */ + public Optional storageRootCredentialName() { + return Optional.ofNullable(this.storageRootCredentialName); + } + /** + * @return Timestamp (in milliseconds) when the current metastore was updated. + * + */ + public Optional updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + /** + * @return the ID of the identity that updated the current metastore. + * + */ + public Optional updatedBy() { + return Optional.ofNullable(this.updatedBy); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCurrentMetastoreMetastoreInfo defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String cloud; + private @Nullable Integer createdAt; + private @Nullable String createdBy; + private @Nullable String defaultDataAccessConfigId; + private @Nullable String deltaSharingOrganizationName; + private @Nullable Integer deltaSharingRecipientTokenLifetimeInSeconds; + private @Nullable String deltaSharingScope; + private @Nullable String globalMetastoreId; + private @Nullable String metastoreId; + private @Nullable String name; + private @Nullable String owner; + private @Nullable String privilegeModelVersion; + private @Nullable String region; + private @Nullable String storageRoot; + private @Nullable String storageRootCredentialId; + private @Nullable String storageRootCredentialName; + private @Nullable Integer updatedAt; + private @Nullable String updatedBy; + public Builder() {} + public Builder(GetCurrentMetastoreMetastoreInfo defaults) { + Objects.requireNonNull(defaults); + this.cloud = defaults.cloud; + this.createdAt = defaults.createdAt; + this.createdBy = defaults.createdBy; + this.defaultDataAccessConfigId = defaults.defaultDataAccessConfigId; + this.deltaSharingOrganizationName = defaults.deltaSharingOrganizationName; + this.deltaSharingRecipientTokenLifetimeInSeconds = defaults.deltaSharingRecipientTokenLifetimeInSeconds; + this.deltaSharingScope = defaults.deltaSharingScope; + this.globalMetastoreId = defaults.globalMetastoreId; + this.metastoreId = defaults.metastoreId; + this.name = defaults.name; + this.owner = defaults.owner; + this.privilegeModelVersion = defaults.privilegeModelVersion; + this.region = defaults.region; + this.storageRoot = defaults.storageRoot; + this.storageRootCredentialId = defaults.storageRootCredentialId; + this.storageRootCredentialName = defaults.storageRootCredentialName; + this.updatedAt = defaults.updatedAt; + this.updatedBy = defaults.updatedBy; + } + + @CustomType.Setter + public Builder cloud(@Nullable String cloud) { + + this.cloud = cloud; + return this; + } + @CustomType.Setter + public Builder createdAt(@Nullable Integer createdAt) { + + this.createdAt = createdAt; + return this; + } + @CustomType.Setter + public Builder createdBy(@Nullable String createdBy) { + + this.createdBy = createdBy; + return this; + } + @CustomType.Setter + public Builder defaultDataAccessConfigId(@Nullable String defaultDataAccessConfigId) { + + this.defaultDataAccessConfigId = defaultDataAccessConfigId; + return this; + } + @CustomType.Setter + public Builder deltaSharingOrganizationName(@Nullable String deltaSharingOrganizationName) { + + this.deltaSharingOrganizationName = deltaSharingOrganizationName; + return this; + } + @CustomType.Setter + public Builder deltaSharingRecipientTokenLifetimeInSeconds(@Nullable Integer deltaSharingRecipientTokenLifetimeInSeconds) { + + this.deltaSharingRecipientTokenLifetimeInSeconds = deltaSharingRecipientTokenLifetimeInSeconds; + return this; + } + @CustomType.Setter + public Builder deltaSharingScope(@Nullable String deltaSharingScope) { + + this.deltaSharingScope = deltaSharingScope; + return this; + } + @CustomType.Setter + public Builder globalMetastoreId(@Nullable String globalMetastoreId) { + + this.globalMetastoreId = globalMetastoreId; + return this; + } + @CustomType.Setter + public Builder metastoreId(@Nullable String metastoreId) { + + this.metastoreId = metastoreId; + return this; + } + @CustomType.Setter + public Builder name(@Nullable String name) { + + this.name = name; + return this; + } + @CustomType.Setter + public Builder owner(@Nullable String owner) { + + this.owner = owner; + return this; + } + @CustomType.Setter + public Builder privilegeModelVersion(@Nullable String privilegeModelVersion) { + + this.privilegeModelVersion = privilegeModelVersion; + return this; + } + @CustomType.Setter + public Builder region(@Nullable String region) { + + this.region = region; + return this; + } + @CustomType.Setter + public Builder storageRoot(@Nullable String storageRoot) { + + this.storageRoot = storageRoot; + return this; + } + @CustomType.Setter + public Builder storageRootCredentialId(@Nullable String storageRootCredentialId) { + + this.storageRootCredentialId = storageRootCredentialId; + return this; + } + @CustomType.Setter + public Builder storageRootCredentialName(@Nullable String storageRootCredentialName) { + + this.storageRootCredentialName = storageRootCredentialName; + return this; + } + @CustomType.Setter + public Builder updatedAt(@Nullable Integer updatedAt) { + + this.updatedAt = updatedAt; + return this; + } + @CustomType.Setter + public Builder updatedBy(@Nullable String updatedBy) { + + this.updatedBy = updatedBy; + return this; + } + public GetCurrentMetastoreMetastoreInfo build() { + final var _resultValue = new GetCurrentMetastoreMetastoreInfo(); + _resultValue.cloud = cloud; + _resultValue.createdAt = createdAt; + _resultValue.createdBy = createdBy; + _resultValue.defaultDataAccessConfigId = defaultDataAccessConfigId; + _resultValue.deltaSharingOrganizationName = deltaSharingOrganizationName; + _resultValue.deltaSharingRecipientTokenLifetimeInSeconds = deltaSharingRecipientTokenLifetimeInSeconds; + _resultValue.deltaSharingScope = deltaSharingScope; + _resultValue.globalMetastoreId = globalMetastoreId; + _resultValue.metastoreId = metastoreId; + _resultValue.name = name; + _resultValue.owner = owner; + _resultValue.privilegeModelVersion = privilegeModelVersion; + _resultValue.region = region; + _resultValue.storageRoot = storageRoot; + _resultValue.storageRootCredentialId = storageRootCredentialId; + _resultValue.storageRootCredentialName = storageRootCredentialName; + _resultValue.updatedAt = updatedAt; + _resultValue.updatedBy = updatedBy; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreResult.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreResult.java new file mode 100644 index 00000000..84585596 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentMetastoreResult.java @@ -0,0 +1,82 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.databricks.outputs.GetCurrentMetastoreMetastoreInfo; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetCurrentMetastoreResult { + /** + * @return metastore ID. + * + */ + private String id; + /** + * @return summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + */ + private GetCurrentMetastoreMetastoreInfo metastoreInfo; + + private GetCurrentMetastoreResult() {} + /** + * @return metastore ID. + * + */ + public String id() { + return this.id; + } + /** + * @return summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + * + */ + public GetCurrentMetastoreMetastoreInfo metastoreInfo() { + return this.metastoreInfo; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCurrentMetastoreResult defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + private GetCurrentMetastoreMetastoreInfo metastoreInfo; + public Builder() {} + public Builder(GetCurrentMetastoreResult defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + this.metastoreInfo = defaults.metastoreInfo; + } + + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetCurrentMetastoreResult", "id"); + } + this.id = id; + return this; + } + @CustomType.Setter + public Builder metastoreInfo(GetCurrentMetastoreMetastoreInfo metastoreInfo) { + if (metastoreInfo == null) { + throw new MissingRequiredPropertyException("GetCurrentMetastoreResult", "metastoreInfo"); + } + this.metastoreInfo = metastoreInfo; + return this; + } + public GetCurrentMetastoreResult build() { + final var _resultValue = new GetCurrentMetastoreResult(); + _resultValue.id = id; + _resultValue.metastoreInfo = metastoreInfo; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetDirectoryResult.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetDirectoryResult.java index 7d2ff5d8..deb577e5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetDirectoryResult.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetDirectoryResult.java @@ -22,6 +22,11 @@ public final class GetDirectoryResult { */ private Integer objectId; private String path; + /** + * @return path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + private String workspacePath; private GetDirectoryResult() {} /** @@ -41,6 +46,13 @@ public Integer objectId() { public String path() { return this.path; } + /** + * @return path on Workspace File System (WSFS) in form of `/Workspace` + `path` + * + */ + public String workspacePath() { + return this.workspacePath; + } public static Builder builder() { return new Builder(); @@ -54,12 +66,14 @@ public static final class Builder { private String id; private Integer objectId; private String path; + private String workspacePath; public Builder() {} public Builder(GetDirectoryResult defaults) { Objects.requireNonNull(defaults); this.id = defaults.id; this.objectId = defaults.objectId; this.path = defaults.path; + this.workspacePath = defaults.workspacePath; } @CustomType.Setter @@ -86,11 +100,20 @@ public Builder path(String path) { this.path = path; return this; } + @CustomType.Setter + public Builder workspacePath(String workspacePath) { + if (workspacePath == null) { + throw new MissingRequiredPropertyException("GetDirectoryResult", "workspacePath"); + } + this.workspacePath = workspacePath; + return this; + } public GetDirectoryResult build() { final var _resultValue = new GetDirectoryResult(); _resultValue.id = id; _resultValue.objectId = objectId; _resultValue.path = path; + _resultValue.workspacePath = workspacePath; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetMetastoreMetastoreInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetMetastoreMetastoreInfo.java index 175ce0ec..b54a026e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetMetastoreMetastoreInfo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetMetastoreMetastoreInfo.java @@ -50,7 +50,7 @@ public final class GetMetastoreMetastoreInfo { private @Nullable String privilegeModelVersion; private @Nullable String region; /** - * @return Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * @return Path on cloud storage account, where managed `databricks.Table` are stored. * */ private @Nullable String storageRoot; @@ -124,7 +124,7 @@ public Optional region() { return Optional.ofNullable(this.region); } /** - * @return Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * @return Path on cloud storage account, where managed `databricks.Table` are stored. * */ public Optional storageRoot() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseChannel.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseChannel.java index caed5583..b25a8ec3 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseChannel.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseChannel.java @@ -11,6 +11,7 @@ @CustomType public final class GetSqlWarehouseChannel { + private @Nullable String dbsqlVersion; /** * @return Name of the SQL warehouse to search (case-sensitive). * @@ -18,6 +19,9 @@ public final class GetSqlWarehouseChannel { private @Nullable String name; private GetSqlWarehouseChannel() {} + public Optional dbsqlVersion() { + return Optional.ofNullable(this.dbsqlVersion); + } /** * @return Name of the SQL warehouse to search (case-sensitive). * @@ -35,13 +39,21 @@ public static Builder builder(GetSqlWarehouseChannel defaults) { } @CustomType.Builder public static final class Builder { + private @Nullable String dbsqlVersion; private @Nullable String name; public Builder() {} public Builder(GetSqlWarehouseChannel defaults) { Objects.requireNonNull(defaults); + this.dbsqlVersion = defaults.dbsqlVersion; this.name = defaults.name; } + @CustomType.Setter + public Builder dbsqlVersion(@Nullable String dbsqlVersion) { + + this.dbsqlVersion = dbsqlVersion; + return this; + } @CustomType.Setter public Builder name(@Nullable String name) { @@ -50,6 +62,7 @@ public Builder name(@Nullable String name) { } public GetSqlWarehouseChannel build() { final var _resultValue = new GetSqlWarehouseChannel(); + _resultValue.dbsqlVersion = dbsqlVersion; _resultValue.name = name; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealth.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealth.java new file mode 100644 index 00000000..d8a48c9b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealth.java @@ -0,0 +1,102 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.databricks.outputs.GetSqlWarehouseHealthFailureReason; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class GetSqlWarehouseHealth { + private @Nullable String details; + private @Nullable GetSqlWarehouseHealthFailureReason failureReason; + private @Nullable String message; + private @Nullable String status; + private @Nullable String summary; + + private GetSqlWarehouseHealth() {} + public Optional details() { + return Optional.ofNullable(this.details); + } + public Optional failureReason() { + return Optional.ofNullable(this.failureReason); + } + public Optional message() { + return Optional.ofNullable(this.message); + } + public Optional status() { + return Optional.ofNullable(this.status); + } + public Optional summary() { + return Optional.ofNullable(this.summary); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetSqlWarehouseHealth defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String details; + private @Nullable GetSqlWarehouseHealthFailureReason failureReason; + private @Nullable String message; + private @Nullable String status; + private @Nullable String summary; + public Builder() {} + public Builder(GetSqlWarehouseHealth defaults) { + Objects.requireNonNull(defaults); + this.details = defaults.details; + this.failureReason = defaults.failureReason; + this.message = defaults.message; + this.status = defaults.status; + this.summary = defaults.summary; + } + + @CustomType.Setter + public Builder details(@Nullable String details) { + + this.details = details; + return this; + } + @CustomType.Setter + public Builder failureReason(@Nullable GetSqlWarehouseHealthFailureReason failureReason) { + + this.failureReason = failureReason; + return this; + } + @CustomType.Setter + public Builder message(@Nullable String message) { + + this.message = message; + return this; + } + @CustomType.Setter + public Builder status(@Nullable String status) { + + this.status = status; + return this; + } + @CustomType.Setter + public Builder summary(@Nullable String summary) { + + this.summary = summary; + return this; + } + public GetSqlWarehouseHealth build() { + final var _resultValue = new GetSqlWarehouseHealth(); + _resultValue.details = details; + _resultValue.failureReason = failureReason; + _resultValue.message = message; + _resultValue.status = status; + _resultValue.summary = summary; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealthFailureReason.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealthFailureReason.java new file mode 100644 index 00000000..846d7024 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseHealthFailureReason.java @@ -0,0 +1,77 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class GetSqlWarehouseHealthFailureReason { + private @Nullable String code; + private @Nullable Map parameters; + private @Nullable String type; + + private GetSqlWarehouseHealthFailureReason() {} + public Optional code() { + return Optional.ofNullable(this.code); + } + public Map parameters() { + return this.parameters == null ? Map.of() : this.parameters; + } + public Optional type() { + return Optional.ofNullable(this.type); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetSqlWarehouseHealthFailureReason defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String code; + private @Nullable Map parameters; + private @Nullable String type; + public Builder() {} + public Builder(GetSqlWarehouseHealthFailureReason defaults) { + Objects.requireNonNull(defaults); + this.code = defaults.code; + this.parameters = defaults.parameters; + this.type = defaults.type; + } + + @CustomType.Setter + public Builder code(@Nullable String code) { + + this.code = code; + return this; + } + @CustomType.Setter + public Builder parameters(@Nullable Map parameters) { + + this.parameters = parameters; + return this; + } + @CustomType.Setter + public Builder type(@Nullable String type) { + + this.type = type; + return this; + } + public GetSqlWarehouseHealthFailureReason build() { + final var _resultValue = new GetSqlWarehouseHealthFailureReason(); + _resultValue.code = code; + _resultValue.parameters = parameters; + _resultValue.type = type; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseOdbcParams.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseOdbcParams.java index 50229a41..93a708b5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseOdbcParams.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseOdbcParams.java @@ -4,7 +4,6 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.Integer; import java.lang.String; import java.util.Objects; @@ -13,27 +12,23 @@ @CustomType public final class GetSqlWarehouseOdbcParams { - private @Nullable String host; private @Nullable String hostname; - private String path; - private Integer port; - private String protocol; + private @Nullable String path; + private @Nullable Integer port; + private @Nullable String protocol; private GetSqlWarehouseOdbcParams() {} - public Optional host() { - return Optional.ofNullable(this.host); - } public Optional hostname() { return Optional.ofNullable(this.hostname); } - public String path() { - return this.path; + public Optional path() { + return Optional.ofNullable(this.path); } - public Integer port() { - return this.port; + public Optional port() { + return Optional.ofNullable(this.port); } - public String protocol() { - return this.protocol; + public Optional protocol() { + return Optional.ofNullable(this.protocol); } public static Builder builder() { @@ -45,27 +40,19 @@ public static Builder builder(GetSqlWarehouseOdbcParams defaults) { } @CustomType.Builder public static final class Builder { - private @Nullable String host; private @Nullable String hostname; - private String path; - private Integer port; - private String protocol; + private @Nullable String path; + private @Nullable Integer port; + private @Nullable String protocol; public Builder() {} public Builder(GetSqlWarehouseOdbcParams defaults) { Objects.requireNonNull(defaults); - this.host = defaults.host; this.hostname = defaults.hostname; this.path = defaults.path; this.port = defaults.port; this.protocol = defaults.protocol; } - @CustomType.Setter - public Builder host(@Nullable String host) { - - this.host = host; - return this; - } @CustomType.Setter public Builder hostname(@Nullable String hostname) { @@ -73,32 +60,25 @@ public Builder hostname(@Nullable String hostname) { return this; } @CustomType.Setter - public Builder path(String path) { - if (path == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParams", "path"); - } + public Builder path(@Nullable String path) { + this.path = path; return this; } @CustomType.Setter - public Builder port(Integer port) { - if (port == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParams", "port"); - } + public Builder port(@Nullable Integer port) { + this.port = port; return this; } @CustomType.Setter - public Builder protocol(String protocol) { - if (protocol == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseOdbcParams", "protocol"); - } + public Builder protocol(@Nullable String protocol) { + this.protocol = protocol; return this; } public GetSqlWarehouseOdbcParams build() { final var _resultValue = new GetSqlWarehouseOdbcParams(); - _resultValue.host = host; _resultValue.hostname = hostname; _resultValue.path = path; _resultValue.port = port; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseResult.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseResult.java index 508df235..0d6356e7 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseResult.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseResult.java @@ -5,6 +5,7 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.databricks.outputs.GetSqlWarehouseChannel; +import com.pulumi.databricks.outputs.GetSqlWarehouseHealth; import com.pulumi.databricks.outputs.GetSqlWarehouseOdbcParams; import com.pulumi.databricks.outputs.GetSqlWarehouseTags; import com.pulumi.exceptions.MissingRequiredPropertyException; @@ -30,6 +31,11 @@ public final class GetSqlWarehouseResult { * */ private String clusterSize; + /** + * @return The username of the user who created the endpoint. + * + */ + private String creatorName; /** * @return ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. * @@ -45,6 +51,11 @@ public final class GetSqlWarehouseResult { * */ private Boolean enableServerlessCompute; + /** + * @return Health status of the endpoint. + * + */ + private GetSqlWarehouseHealth health; /** * @return The ID of the SQL warehouse. * @@ -71,6 +82,15 @@ public final class GetSqlWarehouseResult { * */ private String name; + /** + * @return The current number of clusters used by the endpoint. + * + */ + private Integer numActiveSessions; + /** + * @return The current number of clusters used by the endpoint. + * + */ private Integer numClusters; /** * @return ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -82,12 +102,21 @@ public final class GetSqlWarehouseResult { * */ private String spotInstancePolicy; + /** + * @return The current state of the endpoint. + * + */ private String state; /** * @return tags used for SQL warehouse resources. * */ private GetSqlWarehouseTags tags; + /** + * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + */ + private String warehouseType; private GetSqlWarehouseResult() {} /** @@ -111,6 +140,13 @@ public GetSqlWarehouseChannel channel() { public String clusterSize() { return this.clusterSize; } + /** + * @return The username of the user who created the endpoint. + * + */ + public String creatorName() { + return this.creatorName; + } /** * @return ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. * @@ -132,6 +168,13 @@ public Boolean enablePhoton() { public Boolean enableServerlessCompute() { return this.enableServerlessCompute; } + /** + * @return Health status of the endpoint. + * + */ + public GetSqlWarehouseHealth health() { + return this.health; + } /** * @return The ID of the SQL warehouse. * @@ -170,6 +213,17 @@ public Integer minNumClusters() { public String name() { return this.name; } + /** + * @return The current number of clusters used by the endpoint. + * + */ + public Integer numActiveSessions() { + return this.numActiveSessions; + } + /** + * @return The current number of clusters used by the endpoint. + * + */ public Integer numClusters() { return this.numClusters; } @@ -187,6 +241,10 @@ public GetSqlWarehouseOdbcParams odbcParams() { public String spotInstancePolicy() { return this.spotInstancePolicy; } + /** + * @return The current state of the endpoint. + * + */ public String state() { return this.state; } @@ -197,6 +255,13 @@ public String state() { public GetSqlWarehouseTags tags() { return this.tags; } + /** + * @return SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + * + */ + public String warehouseType() { + return this.warehouseType; + } public static Builder builder() { return new Builder(); @@ -210,40 +275,48 @@ public static final class Builder { private Integer autoStopMins; private GetSqlWarehouseChannel channel; private String clusterSize; + private String creatorName; private String dataSourceId; private Boolean enablePhoton; private Boolean enableServerlessCompute; + private GetSqlWarehouseHealth health; private String id; private String instanceProfileArn; private String jdbcUrl; private Integer maxNumClusters; private Integer minNumClusters; private String name; + private Integer numActiveSessions; private Integer numClusters; private GetSqlWarehouseOdbcParams odbcParams; private String spotInstancePolicy; private String state; private GetSqlWarehouseTags tags; + private String warehouseType; public Builder() {} public Builder(GetSqlWarehouseResult defaults) { Objects.requireNonNull(defaults); this.autoStopMins = defaults.autoStopMins; this.channel = defaults.channel; this.clusterSize = defaults.clusterSize; + this.creatorName = defaults.creatorName; this.dataSourceId = defaults.dataSourceId; this.enablePhoton = defaults.enablePhoton; this.enableServerlessCompute = defaults.enableServerlessCompute; + this.health = defaults.health; this.id = defaults.id; this.instanceProfileArn = defaults.instanceProfileArn; this.jdbcUrl = defaults.jdbcUrl; this.maxNumClusters = defaults.maxNumClusters; this.minNumClusters = defaults.minNumClusters; this.name = defaults.name; + this.numActiveSessions = defaults.numActiveSessions; this.numClusters = defaults.numClusters; this.odbcParams = defaults.odbcParams; this.spotInstancePolicy = defaults.spotInstancePolicy; this.state = defaults.state; this.tags = defaults.tags; + this.warehouseType = defaults.warehouseType; } @CustomType.Setter @@ -271,6 +344,14 @@ public Builder clusterSize(String clusterSize) { return this; } @CustomType.Setter + public Builder creatorName(String creatorName) { + if (creatorName == null) { + throw new MissingRequiredPropertyException("GetSqlWarehouseResult", "creatorName"); + } + this.creatorName = creatorName; + return this; + } + @CustomType.Setter public Builder dataSourceId(String dataSourceId) { if (dataSourceId == null) { throw new MissingRequiredPropertyException("GetSqlWarehouseResult", "dataSourceId"); @@ -295,6 +376,14 @@ public Builder enableServerlessCompute(Boolean enableServerlessCompute) { return this; } @CustomType.Setter + public Builder health(GetSqlWarehouseHealth health) { + if (health == null) { + throw new MissingRequiredPropertyException("GetSqlWarehouseResult", "health"); + } + this.health = health; + return this; + } + @CustomType.Setter public Builder id(String id) { if (id == null) { throw new MissingRequiredPropertyException("GetSqlWarehouseResult", "id"); @@ -343,6 +432,14 @@ public Builder name(String name) { return this; } @CustomType.Setter + public Builder numActiveSessions(Integer numActiveSessions) { + if (numActiveSessions == null) { + throw new MissingRequiredPropertyException("GetSqlWarehouseResult", "numActiveSessions"); + } + this.numActiveSessions = numActiveSessions; + return this; + } + @CustomType.Setter public Builder numClusters(Integer numClusters) { if (numClusters == null) { throw new MissingRequiredPropertyException("GetSqlWarehouseResult", "numClusters"); @@ -382,25 +479,37 @@ public Builder tags(GetSqlWarehouseTags tags) { this.tags = tags; return this; } + @CustomType.Setter + public Builder warehouseType(String warehouseType) { + if (warehouseType == null) { + throw new MissingRequiredPropertyException("GetSqlWarehouseResult", "warehouseType"); + } + this.warehouseType = warehouseType; + return this; + } public GetSqlWarehouseResult build() { final var _resultValue = new GetSqlWarehouseResult(); _resultValue.autoStopMins = autoStopMins; _resultValue.channel = channel; _resultValue.clusterSize = clusterSize; + _resultValue.creatorName = creatorName; _resultValue.dataSourceId = dataSourceId; _resultValue.enablePhoton = enablePhoton; _resultValue.enableServerlessCompute = enableServerlessCompute; + _resultValue.health = health; _resultValue.id = id; _resultValue.instanceProfileArn = instanceProfileArn; _resultValue.jdbcUrl = jdbcUrl; _resultValue.maxNumClusters = maxNumClusters; _resultValue.minNumClusters = minNumClusters; _resultValue.name = name; + _resultValue.numActiveSessions = numActiveSessions; _resultValue.numClusters = numClusters; _resultValue.odbcParams = odbcParams; _resultValue.spotInstancePolicy = spotInstancePolicy; _resultValue.state = state; _resultValue.tags = tags; + _resultValue.warehouseType = warehouseType; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTags.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTags.java index ab18492c..8bcfce6d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTags.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTags.java @@ -5,17 +5,17 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.databricks.outputs.GetSqlWarehouseTagsCustomTag; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.util.List; import java.util.Objects; +import javax.annotation.Nullable; @CustomType public final class GetSqlWarehouseTags { - private List customTags; + private @Nullable List customTags; private GetSqlWarehouseTags() {} public List customTags() { - return this.customTags; + return this.customTags == null ? List.of() : this.customTags; } public static Builder builder() { @@ -27,7 +27,7 @@ public static Builder builder(GetSqlWarehouseTags defaults) { } @CustomType.Builder public static final class Builder { - private List customTags; + private @Nullable List customTags; public Builder() {} public Builder(GetSqlWarehouseTags defaults) { Objects.requireNonNull(defaults); @@ -35,10 +35,8 @@ public Builder(GetSqlWarehouseTags defaults) { } @CustomType.Setter - public Builder customTags(List customTags) { - if (customTags == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTags", "customTags"); - } + public Builder customTags(@Nullable List customTags) { + this.customTags = customTags; return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTagsCustomTag.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTagsCustomTag.java index 5e04aa1c..5494f735 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTagsCustomTag.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetSqlWarehouseTagsCustomTag.java @@ -4,21 +4,22 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.String; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; @CustomType public final class GetSqlWarehouseTagsCustomTag { - private String key; - private String value; + private @Nullable String key; + private @Nullable String value; private GetSqlWarehouseTagsCustomTag() {} - public String key() { - return this.key; + public Optional key() { + return Optional.ofNullable(this.key); } - public String value() { - return this.value; + public Optional value() { + return Optional.ofNullable(this.value); } public static Builder builder() { @@ -30,8 +31,8 @@ public static Builder builder(GetSqlWarehouseTagsCustomTag defaults) { } @CustomType.Builder public static final class Builder { - private String key; - private String value; + private @Nullable String key; + private @Nullable String value; public Builder() {} public Builder(GetSqlWarehouseTagsCustomTag defaults) { Objects.requireNonNull(defaults); @@ -40,18 +41,14 @@ public Builder(GetSqlWarehouseTagsCustomTag defaults) { } @CustomType.Setter - public Builder key(String key) { - if (key == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTagsCustomTag", "key"); - } + public Builder key(@Nullable String key) { + this.key = key; return this; } @CustomType.Setter - public Builder value(String value) { - if (value == null) { - throw new MissingRequiredPropertyException("GetSqlWarehouseTagsCustomTag", "value"); - } + public Builder value(@Nullable String value) { + this.value = value; return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointChannel.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointChannel.java index 855a6c0b..883eedc2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointChannel.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointChannel.java @@ -11,6 +11,7 @@ @CustomType public final class SqlEndpointChannel { + private @Nullable String dbsqlVersion; /** * @return Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. * @@ -18,6 +19,9 @@ public final class SqlEndpointChannel { private @Nullable String name; private SqlEndpointChannel() {} + public Optional dbsqlVersion() { + return Optional.ofNullable(this.dbsqlVersion); + } /** * @return Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. * @@ -35,13 +39,21 @@ public static Builder builder(SqlEndpointChannel defaults) { } @CustomType.Builder public static final class Builder { + private @Nullable String dbsqlVersion; private @Nullable String name; public Builder() {} public Builder(SqlEndpointChannel defaults) { Objects.requireNonNull(defaults); + this.dbsqlVersion = defaults.dbsqlVersion; this.name = defaults.name; } + @CustomType.Setter + public Builder dbsqlVersion(@Nullable String dbsqlVersion) { + + this.dbsqlVersion = dbsqlVersion; + return this; + } @CustomType.Setter public Builder name(@Nullable String name) { @@ -50,6 +62,7 @@ public Builder name(@Nullable String name) { } public SqlEndpointChannel build() { final var _resultValue = new SqlEndpointChannel(); + _resultValue.dbsqlVersion = dbsqlVersion; _resultValue.name = name; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealth.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealth.java new file mode 100644 index 00000000..dbcc5a48 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealth.java @@ -0,0 +1,102 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.databricks.outputs.SqlEndpointHealthFailureReason; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class SqlEndpointHealth { + private @Nullable String details; + private @Nullable SqlEndpointHealthFailureReason failureReason; + private @Nullable String message; + private @Nullable String status; + private @Nullable String summary; + + private SqlEndpointHealth() {} + public Optional details() { + return Optional.ofNullable(this.details); + } + public Optional failureReason() { + return Optional.ofNullable(this.failureReason); + } + public Optional message() { + return Optional.ofNullable(this.message); + } + public Optional status() { + return Optional.ofNullable(this.status); + } + public Optional summary() { + return Optional.ofNullable(this.summary); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(SqlEndpointHealth defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String details; + private @Nullable SqlEndpointHealthFailureReason failureReason; + private @Nullable String message; + private @Nullable String status; + private @Nullable String summary; + public Builder() {} + public Builder(SqlEndpointHealth defaults) { + Objects.requireNonNull(defaults); + this.details = defaults.details; + this.failureReason = defaults.failureReason; + this.message = defaults.message; + this.status = defaults.status; + this.summary = defaults.summary; + } + + @CustomType.Setter + public Builder details(@Nullable String details) { + + this.details = details; + return this; + } + @CustomType.Setter + public Builder failureReason(@Nullable SqlEndpointHealthFailureReason failureReason) { + + this.failureReason = failureReason; + return this; + } + @CustomType.Setter + public Builder message(@Nullable String message) { + + this.message = message; + return this; + } + @CustomType.Setter + public Builder status(@Nullable String status) { + + this.status = status; + return this; + } + @CustomType.Setter + public Builder summary(@Nullable String summary) { + + this.summary = summary; + return this; + } + public SqlEndpointHealth build() { + final var _resultValue = new SqlEndpointHealth(); + _resultValue.details = details; + _resultValue.failureReason = failureReason; + _resultValue.message = message; + _resultValue.status = status; + _resultValue.summary = summary; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealthFailureReason.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealthFailureReason.java new file mode 100644 index 00000000..f3ca9b8e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointHealthFailureReason.java @@ -0,0 +1,77 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class SqlEndpointHealthFailureReason { + private @Nullable String code; + private @Nullable Map parameters; + private @Nullable String type; + + private SqlEndpointHealthFailureReason() {} + public Optional code() { + return Optional.ofNullable(this.code); + } + public Map parameters() { + return this.parameters == null ? Map.of() : this.parameters; + } + public Optional type() { + return Optional.ofNullable(this.type); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(SqlEndpointHealthFailureReason defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String code; + private @Nullable Map parameters; + private @Nullable String type; + public Builder() {} + public Builder(SqlEndpointHealthFailureReason defaults) { + Objects.requireNonNull(defaults); + this.code = defaults.code; + this.parameters = defaults.parameters; + this.type = defaults.type; + } + + @CustomType.Setter + public Builder code(@Nullable String code) { + + this.code = code; + return this; + } + @CustomType.Setter + public Builder parameters(@Nullable Map parameters) { + + this.parameters = parameters; + return this; + } + @CustomType.Setter + public Builder type(@Nullable String type) { + + this.type = type; + return this; + } + public SqlEndpointHealthFailureReason build() { + final var _resultValue = new SqlEndpointHealthFailureReason(); + _resultValue.code = code; + _resultValue.parameters = parameters; + _resultValue.type = type; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointOdbcParams.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointOdbcParams.java index 424cb3fa..e4b18160 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointOdbcParams.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointOdbcParams.java @@ -4,7 +4,6 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.Integer; import java.lang.String; import java.util.Objects; @@ -13,27 +12,23 @@ @CustomType public final class SqlEndpointOdbcParams { - private @Nullable String host; private @Nullable String hostname; - private String path; - private Integer port; - private String protocol; + private @Nullable String path; + private @Nullable Integer port; + private @Nullable String protocol; private SqlEndpointOdbcParams() {} - public Optional host() { - return Optional.ofNullable(this.host); - } public Optional hostname() { return Optional.ofNullable(this.hostname); } - public String path() { - return this.path; + public Optional path() { + return Optional.ofNullable(this.path); } - public Integer port() { - return this.port; + public Optional port() { + return Optional.ofNullable(this.port); } - public String protocol() { - return this.protocol; + public Optional protocol() { + return Optional.ofNullable(this.protocol); } public static Builder builder() { @@ -45,27 +40,19 @@ public static Builder builder(SqlEndpointOdbcParams defaults) { } @CustomType.Builder public static final class Builder { - private @Nullable String host; private @Nullable String hostname; - private String path; - private Integer port; - private String protocol; + private @Nullable String path; + private @Nullable Integer port; + private @Nullable String protocol; public Builder() {} public Builder(SqlEndpointOdbcParams defaults) { Objects.requireNonNull(defaults); - this.host = defaults.host; this.hostname = defaults.hostname; this.path = defaults.path; this.port = defaults.port; this.protocol = defaults.protocol; } - @CustomType.Setter - public Builder host(@Nullable String host) { - - this.host = host; - return this; - } @CustomType.Setter public Builder hostname(@Nullable String hostname) { @@ -73,32 +60,25 @@ public Builder hostname(@Nullable String hostname) { return this; } @CustomType.Setter - public Builder path(String path) { - if (path == null) { - throw new MissingRequiredPropertyException("SqlEndpointOdbcParams", "path"); - } + public Builder path(@Nullable String path) { + this.path = path; return this; } @CustomType.Setter - public Builder port(Integer port) { - if (port == null) { - throw new MissingRequiredPropertyException("SqlEndpointOdbcParams", "port"); - } + public Builder port(@Nullable Integer port) { + this.port = port; return this; } @CustomType.Setter - public Builder protocol(String protocol) { - if (protocol == null) { - throw new MissingRequiredPropertyException("SqlEndpointOdbcParams", "protocol"); - } + public Builder protocol(@Nullable String protocol) { + this.protocol = protocol; return this; } public SqlEndpointOdbcParams build() { final var _resultValue = new SqlEndpointOdbcParams(); - _resultValue.host = host; _resultValue.hostname = hostname; _resultValue.path = path; _resultValue.port = port; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointTags.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointTags.java index a8f7a07f..da389852 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointTags.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlEndpointTags.java @@ -5,17 +5,17 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.databricks.outputs.SqlEndpointTagsCustomTag; -import com.pulumi.exceptions.MissingRequiredPropertyException; import java.util.List; import java.util.Objects; +import javax.annotation.Nullable; @CustomType public final class SqlEndpointTags { - private List customTags; + private @Nullable List customTags; private SqlEndpointTags() {} public List customTags() { - return this.customTags; + return this.customTags == null ? List.of() : this.customTags; } public static Builder builder() { @@ -27,7 +27,7 @@ public static Builder builder(SqlEndpointTags defaults) { } @CustomType.Builder public static final class Builder { - private List customTags; + private @Nullable List customTags; public Builder() {} public Builder(SqlEndpointTags defaults) { Objects.requireNonNull(defaults); @@ -35,10 +35,8 @@ public Builder(SqlEndpointTags defaults) { } @CustomType.Setter - public Builder customTags(List customTags) { - if (customTags == null) { - throw new MissingRequiredPropertyException("SqlEndpointTags", "customTags"); - } + public Builder customTags(@Nullable List customTags) { + this.customTags = customTags; return this; } diff --git a/sdk/nodejs/accessControlRuleSet.ts b/sdk/nodejs/accessControlRuleSet.ts index 723d6cbf..0c3defe9 100644 --- a/sdk/nodejs/accessControlRuleSet.ts +++ b/sdk/nodejs/accessControlRuleSet.ts @@ -7,6 +7,8 @@ import * as outputs from "./types/output"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. * * > **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`. diff --git a/sdk/nodejs/connection.ts b/sdk/nodejs/connection.ts index cd24fd90..52673509 100644 --- a/sdk/nodejs/connection.ts +++ b/sdk/nodejs/connection.ts @@ -5,6 +5,8 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: * * - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. diff --git a/sdk/nodejs/defaultNamespaceSetting.ts b/sdk/nodejs/defaultNamespaceSetting.ts index 681f063f..0382def2 100644 --- a/sdk/nodejs/defaultNamespaceSetting.ts +++ b/sdk/nodejs/defaultNamespaceSetting.ts @@ -7,6 +7,8 @@ import * as outputs from "./types/output"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with workspace-level provider! + * * The `databricks.DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. * Setting the default catalog for the workspace determines the catalog that is used when queries do not reference * a fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query diff --git a/sdk/nodejs/directory.ts b/sdk/nodejs/directory.ts index 357ed845..89eda2f9 100644 --- a/sdk/nodejs/directory.ts +++ b/sdk/nodejs/directory.ts @@ -50,6 +50,10 @@ export class Directory extends pulumi.CustomResource { * The absolute path of the directory, beginning with "/", e.g. "/Demo". */ public readonly path!: pulumi.Output; + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + */ + public /*out*/ readonly workspacePath!: pulumi.Output; /** * Create a Directory resource with the given unique name, arguments, and options. @@ -67,6 +71,7 @@ export class Directory extends pulumi.CustomResource { resourceInputs["deleteRecursive"] = state ? state.deleteRecursive : undefined; resourceInputs["objectId"] = state ? state.objectId : undefined; resourceInputs["path"] = state ? state.path : undefined; + resourceInputs["workspacePath"] = state ? state.workspacePath : undefined; } else { const args = argsOrState as DirectoryArgs | undefined; if ((!args || args.path === undefined) && !opts.urn) { @@ -75,6 +80,7 @@ export class Directory extends pulumi.CustomResource { resourceInputs["deleteRecursive"] = args ? args.deleteRecursive : undefined; resourceInputs["objectId"] = args ? args.objectId : undefined; resourceInputs["path"] = args ? args.path : undefined; + resourceInputs["workspacePath"] = undefined /*out*/; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(Directory.__pulumiType, name, resourceInputs, opts); @@ -94,6 +100,10 @@ export interface DirectoryState { * The absolute path of the directory, beginning with "/", e.g. "/Demo". */ path?: pulumi.Input; + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + */ + workspacePath?: pulumi.Input; } /** diff --git a/sdk/nodejs/externalLocation.ts b/sdk/nodejs/externalLocation.ts index 60435685..7cae0e98 100644 --- a/sdk/nodejs/externalLocation.ts +++ b/sdk/nodejs/externalLocation.ts @@ -7,6 +7,8 @@ import * as outputs from "./types/output"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with workspace-level provider! + * * To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: * * - databricks.StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. diff --git a/sdk/nodejs/getCurrentMetastore.ts b/sdk/nodejs/getCurrentMetastore.ts new file mode 100644 index 00000000..96d24f6b --- /dev/null +++ b/sdk/nodejs/getCurrentMetastore.ts @@ -0,0 +1,116 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "./types/input"; +import * as outputs from "./types/output"; +import * as utilities from "./utilities"; + +/** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add dependsOn attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const this = databricks.getCurrentMetastore({}); + * export const someMetastore = data.databricks_metastore["this"].metastore_info[0]; + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + */ +export function getCurrentMetastore(args?: GetCurrentMetastoreArgs, opts?: pulumi.InvokeOptions): Promise { + args = args || {}; + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("databricks:index/getCurrentMetastore:getCurrentMetastore", { + "id": args.id, + "metastoreInfo": args.metastoreInfo, + }, opts); +} + +/** + * A collection of arguments for invoking getCurrentMetastore. + */ +export interface GetCurrentMetastoreArgs { + /** + * metastore ID. + */ + id?: string; + /** + * summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + */ + metastoreInfo?: inputs.GetCurrentMetastoreMetastoreInfo; +} + +/** + * A collection of values returned by getCurrentMetastore. + */ +export interface GetCurrentMetastoreResult { + /** + * metastore ID. + */ + readonly id: string; + /** + * summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + */ + readonly metastoreInfo: outputs.GetCurrentMetastoreMetastoreInfo; +} +/** + * Retrieves information about metastore attached to a given workspace. + * + * > **Note** This is the workspace-level data source. + * + * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add dependsOn attribute to prevent _authentication is not configured for provider_ errors. + * + * ## Example Usage + * + * MetastoreSummary response for a metastore attached to the current workspace. + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const this = databricks.getCurrentMetastore({}); + * export const someMetastore = data.databricks_metastore["this"].metastore_info[0]; + * ``` + * ## Related Resources + * + * The following resources are used in the same context: + * + * * databricks.Metastore to get information for a metastore with a given ID. + * * databricks.getMetastores to get a mapping of name to id of all metastores. + * * databricks.Metastore to manage Metastores within Unity Catalog. + * * databricks.Catalog to manage catalogs within Unity Catalog. + */ +export function getCurrentMetastoreOutput(args?: GetCurrentMetastoreOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getCurrentMetastore(a, opts)) +} + +/** + * A collection of arguments for invoking getCurrentMetastore. + */ +export interface GetCurrentMetastoreOutputArgs { + /** + * metastore ID. + */ + id?: pulumi.Input; + /** + * summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + */ + metastoreInfo?: pulumi.Input; +} diff --git a/sdk/nodejs/getDirectory.ts b/sdk/nodejs/getDirectory.ts index 21b8fbf3..f63da88b 100644 --- a/sdk/nodejs/getDirectory.ts +++ b/sdk/nodejs/getDirectory.ts @@ -56,6 +56,10 @@ export interface GetDirectoryResult { */ readonly objectId: number; readonly path: string; + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + */ + readonly workspacePath: string; } /** * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add dependsOn attribute in order to prevent _default auth: cannot configure default credentials_ errors. diff --git a/sdk/nodejs/getServicePrincipal.ts b/sdk/nodejs/getServicePrincipal.ts index b8a54ca5..41a6ff7d 100644 --- a/sdk/nodejs/getServicePrincipal.ts +++ b/sdk/nodejs/getServicePrincipal.ts @@ -75,7 +75,7 @@ export interface GetServicePrincipalArgs { */ applicationId?: string; /** - * Display name of the service principal, e.g. `Foo SPN`. + * Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. */ displayName?: string; /** @@ -190,7 +190,7 @@ export interface GetServicePrincipalOutputArgs { */ applicationId?: pulumi.Input; /** - * Display name of the service principal, e.g. `Foo SPN`. + * Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. */ displayName?: pulumi.Input; /** diff --git a/sdk/nodejs/getSqlWarehouse.ts b/sdk/nodejs/getSqlWarehouse.ts index 9cdf28e8..ce07474c 100644 --- a/sdk/nodejs/getSqlWarehouse.ts +++ b/sdk/nodejs/getSqlWarehouse.ts @@ -53,20 +53,24 @@ export function getSqlWarehouse(args?: GetSqlWarehouseArgs, opts?: pulumi.Invoke "autoStopMins": args.autoStopMins, "channel": args.channel, "clusterSize": args.clusterSize, + "creatorName": args.creatorName, "dataSourceId": args.dataSourceId, "enablePhoton": args.enablePhoton, "enableServerlessCompute": args.enableServerlessCompute, + "health": args.health, "id": args.id, "instanceProfileArn": args.instanceProfileArn, "jdbcUrl": args.jdbcUrl, "maxNumClusters": args.maxNumClusters, "minNumClusters": args.minNumClusters, "name": args.name, + "numActiveSessions": args.numActiveSessions, "numClusters": args.numClusters, "odbcParams": args.odbcParams, "spotInstancePolicy": args.spotInstancePolicy, "state": args.state, "tags": args.tags, + "warehouseType": args.warehouseType, }, opts); } @@ -86,6 +90,10 @@ export interface GetSqlWarehouseArgs { * The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". */ clusterSize?: string; + /** + * The username of the user who created the endpoint. + */ + creatorName?: string; /** * ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. */ @@ -98,6 +106,10 @@ export interface GetSqlWarehouseArgs { * Whether this SQL warehouse is a serverless SQL warehouse. */ enableServerlessCompute?: boolean; + /** + * Health status of the endpoint. + */ + health?: inputs.GetSqlWarehouseHealth; /** * The ID of the SQL warehouse. */ @@ -119,6 +131,13 @@ export interface GetSqlWarehouseArgs { * Name of the SQL warehouse to search (case-sensitive). */ name?: string; + /** + * The current number of clusters used by the endpoint. + */ + numActiveSessions?: number; + /** + * The current number of clusters used by the endpoint. + */ numClusters?: number; /** * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -128,11 +147,18 @@ export interface GetSqlWarehouseArgs { * The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. */ spotInstancePolicy?: string; + /** + * The current state of the endpoint. + */ state?: string; /** * tags used for SQL warehouse resources. */ tags?: inputs.GetSqlWarehouseTags; + /** + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + */ + warehouseType?: string; } /** @@ -151,6 +177,10 @@ export interface GetSqlWarehouseResult { * The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". */ readonly clusterSize: string; + /** + * The username of the user who created the endpoint. + */ + readonly creatorName: string; /** * ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. */ @@ -163,6 +193,10 @@ export interface GetSqlWarehouseResult { * Whether this SQL warehouse is a serverless SQL warehouse. */ readonly enableServerlessCompute: boolean; + /** + * Health status of the endpoint. + */ + readonly health: outputs.GetSqlWarehouseHealth; /** * The ID of the SQL warehouse. */ @@ -184,6 +218,13 @@ export interface GetSqlWarehouseResult { * Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. */ readonly name: string; + /** + * The current number of clusters used by the endpoint. + */ + readonly numActiveSessions: number; + /** + * The current number of clusters used by the endpoint. + */ readonly numClusters: number; /** * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -193,11 +234,18 @@ export interface GetSqlWarehouseResult { * The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. */ readonly spotInstancePolicy: string; + /** + * The current state of the endpoint. + */ readonly state: string; /** * tags used for SQL warehouse resources. */ readonly tags: outputs.GetSqlWarehouseTags; + /** + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + */ + readonly warehouseType: string; } /** * > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add dependsOn attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -258,6 +306,10 @@ export interface GetSqlWarehouseOutputArgs { * The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". */ clusterSize?: pulumi.Input; + /** + * The username of the user who created the endpoint. + */ + creatorName?: pulumi.Input; /** * ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. */ @@ -270,6 +322,10 @@ export interface GetSqlWarehouseOutputArgs { * Whether this SQL warehouse is a serverless SQL warehouse. */ enableServerlessCompute?: pulumi.Input; + /** + * Health status of the endpoint. + */ + health?: pulumi.Input; /** * The ID of the SQL warehouse. */ @@ -291,6 +347,13 @@ export interface GetSqlWarehouseOutputArgs { * Name of the SQL warehouse to search (case-sensitive). */ name?: pulumi.Input; + /** + * The current number of clusters used by the endpoint. + */ + numActiveSessions?: pulumi.Input; + /** + * The current number of clusters used by the endpoint. + */ numClusters?: pulumi.Input; /** * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -300,9 +363,16 @@ export interface GetSqlWarehouseOutputArgs { * The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. */ spotInstancePolicy?: pulumi.Input; + /** + * The current state of the endpoint. + */ state?: pulumi.Input; /** * tags used for SQL warehouse resources. */ tags?: pulumi.Input; + /** + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + */ + warehouseType?: pulumi.Input; } diff --git a/sdk/nodejs/grant.ts b/sdk/nodejs/grant.ts new file mode 100644 index 00000000..125ab2c1 --- /dev/null +++ b/sdk/nodejs/grant.ts @@ -0,0 +1,148 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as utilities from "./utilities"; + +export class Grant extends pulumi.CustomResource { + /** + * Get an existing Grant resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: GrantState, opts?: pulumi.CustomResourceOptions): Grant { + return new Grant(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'databricks:index/grant:Grant'; + + /** + * Returns true if the given object is an instance of Grant. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is Grant { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === Grant.__pulumiType; + } + + public readonly catalog!: pulumi.Output; + public readonly externalLocation!: pulumi.Output; + public readonly foreignConnection!: pulumi.Output; + public readonly function!: pulumi.Output; + public readonly metastore!: pulumi.Output; + public readonly model!: pulumi.Output; + public readonly pipeline!: pulumi.Output; + public readonly principal!: pulumi.Output; + public readonly privileges!: pulumi.Output; + public readonly recipient!: pulumi.Output; + public readonly schema!: pulumi.Output; + public readonly share!: pulumi.Output; + public readonly storageCredential!: pulumi.Output; + public readonly table!: pulumi.Output; + public readonly volume!: pulumi.Output; + + /** + * Create a Grant resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: GrantArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: GrantArgs | GrantState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as GrantState | undefined; + resourceInputs["catalog"] = state ? state.catalog : undefined; + resourceInputs["externalLocation"] = state ? state.externalLocation : undefined; + resourceInputs["foreignConnection"] = state ? state.foreignConnection : undefined; + resourceInputs["function"] = state ? state.function : undefined; + resourceInputs["metastore"] = state ? state.metastore : undefined; + resourceInputs["model"] = state ? state.model : undefined; + resourceInputs["pipeline"] = state ? state.pipeline : undefined; + resourceInputs["principal"] = state ? state.principal : undefined; + resourceInputs["privileges"] = state ? state.privileges : undefined; + resourceInputs["recipient"] = state ? state.recipient : undefined; + resourceInputs["schema"] = state ? state.schema : undefined; + resourceInputs["share"] = state ? state.share : undefined; + resourceInputs["storageCredential"] = state ? state.storageCredential : undefined; + resourceInputs["table"] = state ? state.table : undefined; + resourceInputs["volume"] = state ? state.volume : undefined; + } else { + const args = argsOrState as GrantArgs | undefined; + if ((!args || args.principal === undefined) && !opts.urn) { + throw new Error("Missing required property 'principal'"); + } + if ((!args || args.privileges === undefined) && !opts.urn) { + throw new Error("Missing required property 'privileges'"); + } + resourceInputs["catalog"] = args ? args.catalog : undefined; + resourceInputs["externalLocation"] = args ? args.externalLocation : undefined; + resourceInputs["foreignConnection"] = args ? args.foreignConnection : undefined; + resourceInputs["function"] = args ? args.function : undefined; + resourceInputs["metastore"] = args ? args.metastore : undefined; + resourceInputs["model"] = args ? args.model : undefined; + resourceInputs["pipeline"] = args ? args.pipeline : undefined; + resourceInputs["principal"] = args ? args.principal : undefined; + resourceInputs["privileges"] = args ? args.privileges : undefined; + resourceInputs["recipient"] = args ? args.recipient : undefined; + resourceInputs["schema"] = args ? args.schema : undefined; + resourceInputs["share"] = args ? args.share : undefined; + resourceInputs["storageCredential"] = args ? args.storageCredential : undefined; + resourceInputs["table"] = args ? args.table : undefined; + resourceInputs["volume"] = args ? args.volume : undefined; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + super(Grant.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering Grant resources. + */ +export interface GrantState { + catalog?: pulumi.Input; + externalLocation?: pulumi.Input; + foreignConnection?: pulumi.Input; + function?: pulumi.Input; + metastore?: pulumi.Input; + model?: pulumi.Input; + pipeline?: pulumi.Input; + principal?: pulumi.Input; + privileges?: pulumi.Input[]>; + recipient?: pulumi.Input; + schema?: pulumi.Input; + share?: pulumi.Input; + storageCredential?: pulumi.Input; + table?: pulumi.Input; + volume?: pulumi.Input; +} + +/** + * The set of arguments for constructing a Grant resource. + */ +export interface GrantArgs { + catalog?: pulumi.Input; + externalLocation?: pulumi.Input; + foreignConnection?: pulumi.Input; + function?: pulumi.Input; + metastore?: pulumi.Input; + model?: pulumi.Input; + pipeline?: pulumi.Input; + principal: pulumi.Input; + privileges: pulumi.Input[]>; + recipient?: pulumi.Input; + schema?: pulumi.Input; + share?: pulumi.Input; + storageCredential?: pulumi.Input; + table?: pulumi.Input; + volume?: pulumi.Input; +} diff --git a/sdk/nodejs/index.ts b/sdk/nodejs/index.ts index 693766fd..8f5c9c31 100644 --- a/sdk/nodejs/index.ts +++ b/sdk/nodejs/index.ts @@ -105,6 +105,11 @@ export const getCurrentConfig: typeof import("./getCurrentConfig").getCurrentCon export const getCurrentConfigOutput: typeof import("./getCurrentConfig").getCurrentConfigOutput = null as any; utilities.lazyLoad(exports, ["getCurrentConfig","getCurrentConfigOutput"], () => require("./getCurrentConfig")); +export { GetCurrentMetastoreArgs, GetCurrentMetastoreResult, GetCurrentMetastoreOutputArgs } from "./getCurrentMetastore"; +export const getCurrentMetastore: typeof import("./getCurrentMetastore").getCurrentMetastore = null as any; +export const getCurrentMetastoreOutput: typeof import("./getCurrentMetastore").getCurrentMetastoreOutput = null as any; +utilities.lazyLoad(exports, ["getCurrentMetastore","getCurrentMetastoreOutput"], () => require("./getCurrentMetastore")); + export { GetCurrentUserResult } from "./getCurrentUser"; export const getCurrentUser: typeof import("./getCurrentUser").getCurrentUser = null as any; export const getCurrentUserOutput: typeof import("./getCurrentUser").getCurrentUserOutput = null as any; @@ -265,6 +270,11 @@ export type GlobalInitScript = import("./globalInitScript").GlobalInitScript; export const GlobalInitScript: typeof import("./globalInitScript").GlobalInitScript = null as any; utilities.lazyLoad(exports, ["GlobalInitScript"], () => require("./globalInitScript")); +export { GrantArgs, GrantState } from "./grant"; +export type Grant = import("./grant").Grant; +export const Grant: typeof import("./grant").Grant = null as any; +utilities.lazyLoad(exports, ["Grant"], () => require("./grant")); + export { GrantsArgs, GrantsState } from "./grants"; export type Grants = import("./grants").Grants; export const Grants: typeof import("./grants").Grants = null as any; @@ -627,6 +637,8 @@ const _module = { return new GitCredential(name, undefined, { urn }) case "databricks:index/globalInitScript:GlobalInitScript": return new GlobalInitScript(name, undefined, { urn }) + case "databricks:index/grant:Grant": + return new Grant(name, undefined, { urn }) case "databricks:index/grants:Grants": return new Grants(name, undefined, { urn }) case "databricks:index/group:Group": @@ -772,6 +784,7 @@ pulumi.runtime.registerResourceModule("databricks", "index/entitlements", _modul pulumi.runtime.registerResourceModule("databricks", "index/externalLocation", _module) pulumi.runtime.registerResourceModule("databricks", "index/gitCredential", _module) pulumi.runtime.registerResourceModule("databricks", "index/globalInitScript", _module) +pulumi.runtime.registerResourceModule("databricks", "index/grant", _module) pulumi.runtime.registerResourceModule("databricks", "index/grants", _module) pulumi.runtime.registerResourceModule("databricks", "index/group", _module) pulumi.runtime.registerResourceModule("databricks", "index/groupInstanceProfile", _module) diff --git a/sdk/nodejs/metastore.ts b/sdk/nodejs/metastore.ts index cb2b5f62..7235ed04 100644 --- a/sdk/nodejs/metastore.ts +++ b/sdk/nodejs/metastore.ts @@ -5,6 +5,8 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. * * Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). diff --git a/sdk/nodejs/metastoreAssignment.ts b/sdk/nodejs/metastoreAssignment.ts index b5b3ebe8..0e1169f3 100644 --- a/sdk/nodejs/metastoreAssignment.ts +++ b/sdk/nodejs/metastoreAssignment.ts @@ -5,6 +5,8 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with account-level provider! + * * A single databricks.Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. * * ## Example Usage diff --git a/sdk/nodejs/metastoreDataAccess.ts b/sdk/nodejs/metastoreDataAccess.ts index 58d5c059..74f84df5 100644 --- a/sdk/nodejs/metastoreDataAccess.ts +++ b/sdk/nodejs/metastoreDataAccess.ts @@ -7,6 +7,8 @@ import * as outputs from "./types/output"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * Optionally, each databricks.Metastore can have a default databricks.StorageCredential defined as `databricks.MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined. * * ## Import @@ -61,6 +63,7 @@ export class MetastoreDataAccess extends pulumi.CustomResource { public readonly name!: pulumi.Output; public readonly owner!: pulumi.Output; public readonly readOnly!: pulumi.Output; + public readonly skipValidation!: pulumi.Output; /** * Create a MetastoreDataAccess resource with the given unique name, arguments, and options. @@ -88,6 +91,7 @@ export class MetastoreDataAccess extends pulumi.CustomResource { resourceInputs["name"] = state ? state.name : undefined; resourceInputs["owner"] = state ? state.owner : undefined; resourceInputs["readOnly"] = state ? state.readOnly : undefined; + resourceInputs["skipValidation"] = state ? state.skipValidation : undefined; } else { const args = argsOrState as MetastoreDataAccessArgs | undefined; resourceInputs["awsIamRole"] = args ? args.awsIamRole : undefined; @@ -103,6 +107,7 @@ export class MetastoreDataAccess extends pulumi.CustomResource { resourceInputs["name"] = args ? args.name : undefined; resourceInputs["owner"] = args ? args.owner : undefined; resourceInputs["readOnly"] = args ? args.readOnly : undefined; + resourceInputs["skipValidation"] = args ? args.skipValidation : undefined; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(MetastoreDataAccess.__pulumiType, name, resourceInputs, opts); @@ -129,6 +134,7 @@ export interface MetastoreDataAccessState { name?: pulumi.Input; owner?: pulumi.Input; readOnly?: pulumi.Input; + skipValidation?: pulumi.Input; } /** @@ -151,4 +157,5 @@ export interface MetastoreDataAccessArgs { name?: pulumi.Input; owner?: pulumi.Input; readOnly?: pulumi.Input; + skipValidation?: pulumi.Input; } diff --git a/sdk/nodejs/metastoreProvider.ts b/sdk/nodejs/metastoreProvider.ts index 09ed41ac..70e57fab 100644 --- a/sdk/nodejs/metastoreProvider.ts +++ b/sdk/nodejs/metastoreProvider.ts @@ -5,6 +5,8 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. * * A `databricks.MetastoreProvider` is contained within databricks.Metastore and can contain a list of shares that have been shared with you. diff --git a/sdk/nodejs/recipient.ts b/sdk/nodejs/recipient.ts index c304cac4..e68cfc59 100644 --- a/sdk/nodejs/recipient.ts +++ b/sdk/nodejs/recipient.ts @@ -7,6 +7,8 @@ import * as outputs from "./types/output"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Within a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to. * * A `databricks.Recipient` is contained within databricks.Metastore and can have permissions to `SELECT` from a list of shares. diff --git a/sdk/nodejs/registeredModel.ts b/sdk/nodejs/registeredModel.ts index 1a1fa987..74290614 100644 --- a/sdk/nodejs/registeredModel.ts +++ b/sdk/nodejs/registeredModel.ts @@ -5,6 +5,8 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with workspace-level provider! + * * This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. * * ## Example Usage diff --git a/sdk/nodejs/repo.ts b/sdk/nodejs/repo.ts index fe6ea4b7..0d515fdb 100644 --- a/sdk/nodejs/repo.ts +++ b/sdk/nodejs/repo.ts @@ -68,6 +68,10 @@ export class Repo extends pulumi.CustomResource { * The URL of the Git Repository to clone from. If the value changes, repo is re-created. */ public readonly url!: pulumi.Output; + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + */ + public /*out*/ readonly workspacePath!: pulumi.Output; /** * Create a Repo resource with the given unique name, arguments, and options. @@ -89,6 +93,7 @@ export class Repo extends pulumi.CustomResource { resourceInputs["sparseCheckout"] = state ? state.sparseCheckout : undefined; resourceInputs["tag"] = state ? state.tag : undefined; resourceInputs["url"] = state ? state.url : undefined; + resourceInputs["workspacePath"] = state ? state.workspacePath : undefined; } else { const args = argsOrState as RepoArgs | undefined; if ((!args || args.url === undefined) && !opts.urn) { @@ -101,6 +106,7 @@ export class Repo extends pulumi.CustomResource { resourceInputs["sparseCheckout"] = args ? args.sparseCheckout : undefined; resourceInputs["tag"] = args ? args.tag : undefined; resourceInputs["url"] = args ? args.url : undefined; + resourceInputs["workspacePath"] = undefined /*out*/; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(Repo.__pulumiType, name, resourceInputs, opts); @@ -136,6 +142,10 @@ export interface RepoState { * The URL of the Git Repository to clone from. If the value changes, repo is re-created. */ url?: pulumi.Input; + /** + * path on Workspace File System (WSFS) in form of `/Workspace` + `path` + */ + workspacePath?: pulumi.Input; } /** diff --git a/sdk/nodejs/schema.ts b/sdk/nodejs/schema.ts index 9b9a7efc..40cd6bc7 100644 --- a/sdk/nodejs/schema.ts +++ b/sdk/nodejs/schema.ts @@ -5,6 +5,8 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be only used with workspace-level provider! + * * Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. * * A `databricks.Schema` is contained within databricks.Catalog and can contain tables & views. diff --git a/sdk/nodejs/sqlEndpoint.ts b/sdk/nodejs/sqlEndpoint.ts index ff5ee416..bad4d7d4 100644 --- a/sdk/nodejs/sqlEndpoint.ts +++ b/sdk/nodejs/sqlEndpoint.ts @@ -90,6 +90,10 @@ export class SqlEndpoint extends pulumi.CustomResource { * The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". */ public readonly clusterSize!: pulumi.Output; + /** + * The username of the user who created the endpoint. + */ + public /*out*/ readonly creatorName!: pulumi.Output; /** * ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. */ @@ -106,11 +110,15 @@ export class SqlEndpoint extends pulumi.CustomResource { * - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). */ public readonly enableServerlessCompute!: pulumi.Output; + /** + * Health status of the endpoint. + */ + public /*out*/ readonly healths!: pulumi.Output; public readonly instanceProfileArn!: pulumi.Output; /** * JDBC connection string. */ - public readonly jdbcUrl!: pulumi.Output; + public /*out*/ readonly jdbcUrl!: pulumi.Output; /** * Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. */ @@ -123,22 +131,32 @@ export class SqlEndpoint extends pulumi.CustomResource { * Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. */ public readonly name!: pulumi.Output; - public readonly numClusters!: pulumi.Output; + /** + * The current number of clusters used by the endpoint. + */ + public /*out*/ readonly numActiveSessions!: pulumi.Output; + /** + * The current number of clusters used by the endpoint. + */ + public /*out*/ readonly numClusters!: pulumi.Output; /** * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. */ - public readonly odbcParams!: pulumi.Output; + public /*out*/ readonly odbcParams!: pulumi.Output; /** * The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. */ public readonly spotInstancePolicy!: pulumi.Output; - public readonly state!: pulumi.Output; + /** + * The current state of the endpoint. + */ + public /*out*/ readonly state!: pulumi.Output; /** * Databricks tags all endpoint resources with these tags. */ public readonly tags!: pulumi.Output; /** - * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. */ public readonly warehouseType!: pulumi.Output; @@ -158,14 +176,17 @@ export class SqlEndpoint extends pulumi.CustomResource { resourceInputs["autoStopMins"] = state ? state.autoStopMins : undefined; resourceInputs["channel"] = state ? state.channel : undefined; resourceInputs["clusterSize"] = state ? state.clusterSize : undefined; + resourceInputs["creatorName"] = state ? state.creatorName : undefined; resourceInputs["dataSourceId"] = state ? state.dataSourceId : undefined; resourceInputs["enablePhoton"] = state ? state.enablePhoton : undefined; resourceInputs["enableServerlessCompute"] = state ? state.enableServerlessCompute : undefined; + resourceInputs["healths"] = state ? state.healths : undefined; resourceInputs["instanceProfileArn"] = state ? state.instanceProfileArn : undefined; resourceInputs["jdbcUrl"] = state ? state.jdbcUrl : undefined; resourceInputs["maxNumClusters"] = state ? state.maxNumClusters : undefined; resourceInputs["minNumClusters"] = state ? state.minNumClusters : undefined; resourceInputs["name"] = state ? state.name : undefined; + resourceInputs["numActiveSessions"] = state ? state.numActiveSessions : undefined; resourceInputs["numClusters"] = state ? state.numClusters : undefined; resourceInputs["odbcParams"] = state ? state.odbcParams : undefined; resourceInputs["spotInstancePolicy"] = state ? state.spotInstancePolicy : undefined; @@ -184,16 +205,19 @@ export class SqlEndpoint extends pulumi.CustomResource { resourceInputs["enablePhoton"] = args ? args.enablePhoton : undefined; resourceInputs["enableServerlessCompute"] = args ? args.enableServerlessCompute : undefined; resourceInputs["instanceProfileArn"] = args ? args.instanceProfileArn : undefined; - resourceInputs["jdbcUrl"] = args ? args.jdbcUrl : undefined; resourceInputs["maxNumClusters"] = args ? args.maxNumClusters : undefined; resourceInputs["minNumClusters"] = args ? args.minNumClusters : undefined; resourceInputs["name"] = args ? args.name : undefined; - resourceInputs["numClusters"] = args ? args.numClusters : undefined; - resourceInputs["odbcParams"] = args ? args.odbcParams : undefined; resourceInputs["spotInstancePolicy"] = args ? args.spotInstancePolicy : undefined; - resourceInputs["state"] = args ? args.state : undefined; resourceInputs["tags"] = args ? args.tags : undefined; resourceInputs["warehouseType"] = args ? args.warehouseType : undefined; + resourceInputs["creatorName"] = undefined /*out*/; + resourceInputs["healths"] = undefined /*out*/; + resourceInputs["jdbcUrl"] = undefined /*out*/; + resourceInputs["numActiveSessions"] = undefined /*out*/; + resourceInputs["numClusters"] = undefined /*out*/; + resourceInputs["odbcParams"] = undefined /*out*/; + resourceInputs["state"] = undefined /*out*/; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(SqlEndpoint.__pulumiType, name, resourceInputs, opts); @@ -216,6 +240,10 @@ export interface SqlEndpointState { * The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". */ clusterSize?: pulumi.Input; + /** + * The username of the user who created the endpoint. + */ + creatorName?: pulumi.Input; /** * ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. */ @@ -232,6 +260,10 @@ export interface SqlEndpointState { * - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). */ enableServerlessCompute?: pulumi.Input; + /** + * Health status of the endpoint. + */ + healths?: pulumi.Input[]>; instanceProfileArn?: pulumi.Input; /** * JDBC connection string. @@ -249,6 +281,13 @@ export interface SqlEndpointState { * Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. */ name?: pulumi.Input; + /** + * The current number of clusters used by the endpoint. + */ + numActiveSessions?: pulumi.Input; + /** + * The current number of clusters used by the endpoint. + */ numClusters?: pulumi.Input; /** * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. @@ -258,13 +297,16 @@ export interface SqlEndpointState { * The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. */ spotInstancePolicy?: pulumi.Input; + /** + * The current state of the endpoint. + */ state?: pulumi.Input; /** * Databricks tags all endpoint resources with these tags. */ tags?: pulumi.Input; /** - * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. */ warehouseType?: pulumi.Input; } @@ -302,10 +344,6 @@ export interface SqlEndpointArgs { */ enableServerlessCompute?: pulumi.Input; instanceProfileArn?: pulumi.Input; - /** - * JDBC connection string. - */ - jdbcUrl?: pulumi.Input; /** * Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. */ @@ -318,22 +356,16 @@ export interface SqlEndpointArgs { * Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. */ name?: pulumi.Input; - numClusters?: pulumi.Input; - /** - * ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - */ - odbcParams?: pulumi.Input; /** * The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. */ spotInstancePolicy?: pulumi.Input; - state?: pulumi.Input; /** * Databricks tags all endpoint resources with these tags. */ tags?: pulumi.Input; /** - * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + * SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enableServerlessCompute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. */ warehouseType?: pulumi.Input; } diff --git a/sdk/nodejs/storageCredential.ts b/sdk/nodejs/storageCredential.ts index 1fac6f36..c1ca21aa 100644 --- a/sdk/nodejs/storageCredential.ts +++ b/sdk/nodejs/storageCredential.ts @@ -7,6 +7,8 @@ import * as outputs from "./types/output"; import * as utilities from "./utilities"; /** + * > **Note** This resource could be used with account or workspace-level provider. + * * To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: * * - `databricks.StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. @@ -137,6 +139,10 @@ export class StorageCredential extends pulumi.CustomResource { * Indicates whether the storage credential is only usable for read operations. */ public readonly readOnly!: pulumi.Output; + /** + * Suppress validation errors if any & force save the storage credential. + */ + public readonly skipValidation!: pulumi.Output; /** * Create a StorageCredential resource with the given unique name, arguments, and options. @@ -163,6 +169,7 @@ export class StorageCredential extends pulumi.CustomResource { resourceInputs["name"] = state ? state.name : undefined; resourceInputs["owner"] = state ? state.owner : undefined; resourceInputs["readOnly"] = state ? state.readOnly : undefined; + resourceInputs["skipValidation"] = state ? state.skipValidation : undefined; } else { const args = argsOrState as StorageCredentialArgs | undefined; resourceInputs["awsIamRole"] = args ? args.awsIamRole : undefined; @@ -177,6 +184,7 @@ export class StorageCredential extends pulumi.CustomResource { resourceInputs["name"] = args ? args.name : undefined; resourceInputs["owner"] = args ? args.owner : undefined; resourceInputs["readOnly"] = args ? args.readOnly : undefined; + resourceInputs["skipValidation"] = args ? args.skipValidation : undefined; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(StorageCredential.__pulumiType, name, resourceInputs, opts); @@ -216,6 +224,10 @@ export interface StorageCredentialState { * Indicates whether the storage credential is only usable for read operations. */ readOnly?: pulumi.Input; + /** + * Suppress validation errors if any & force save the storage credential. + */ + skipValidation?: pulumi.Input; } /** @@ -251,4 +263,8 @@ export interface StorageCredentialArgs { * Indicates whether the storage credential is only usable for read operations. */ readOnly?: pulumi.Input; + /** + * Suppress validation errors if any & force save the storage credential. + */ + skipValidation?: pulumi.Input; } diff --git a/sdk/nodejs/systemSchema.ts b/sdk/nodejs/systemSchema.ts index e81f12b5..febbabc1 100644 --- a/sdk/nodejs/systemSchema.ts +++ b/sdk/nodejs/systemSchema.ts @@ -7,8 +7,7 @@ import * as utilities from "./utilities"; /** * > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). * - * > **Notes** - * Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. + * > **Note** This resource could be only used with workspace-level provider! * * Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. * diff --git a/sdk/nodejs/tsconfig.json b/sdk/nodejs/tsconfig.json index 665ee844..953878ac 100644 --- a/sdk/nodejs/tsconfig.json +++ b/sdk/nodejs/tsconfig.json @@ -35,6 +35,7 @@ "getClusterPolicy.ts", "getClusters.ts", "getCurrentConfig.ts", + "getCurrentMetastore.ts", "getCurrentUser.ts", "getDbfsFile.ts", "getDbfsFilePaths.ts", @@ -67,6 +68,7 @@ "getZones.ts", "gitCredential.ts", "globalInitScript.ts", + "grant.ts", "grants.ts", "group.ts", "groupInstanceProfile.ts", diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index 4cce2b4c..75006759 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -945,6 +945,150 @@ export interface GetClusterClusterInfoTerminationReasonArgs { type?: pulumi.Input; } +export interface GetCurrentMetastoreMetastoreInfo { + cloud?: string; + /** + * Timestamp (in milliseconds) when the current metastore was created. + */ + createdAt?: number; + /** + * the ID of the identity that created the current metastore. + */ + createdBy?: string; + /** + * the ID of the default data access configuration. + */ + defaultDataAccessConfigId?: string; + /** + * The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + */ + deltaSharingOrganizationName?: string; + /** + * the expiration duration in seconds on recipient data access tokens. + */ + deltaSharingRecipientTokenLifetimeInSeconds?: number; + /** + * Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + */ + deltaSharingScope?: string; + /** + * Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + */ + globalMetastoreId?: string; + /** + * Metastore ID. + */ + metastoreId?: string; + /** + * Name of metastore. + */ + name?: string; + /** + * Username/group name/sp applicationId of the metastore owner. + */ + owner?: string; + /** + * the version of the privilege model used by the metastore. + */ + privilegeModelVersion?: string; + /** + * (Mandatory for account-level) The region of the metastore. + */ + region?: string; + /** + * Path on cloud storage account, where managed `databricks.Table` are stored. + */ + storageRoot?: string; + /** + * ID of a storage credential used for the `storageRoot`. + */ + storageRootCredentialId?: string; + /** + * Name of a storage credential used for the `storageRoot`. + */ + storageRootCredentialName?: string; + /** + * Timestamp (in milliseconds) when the current metastore was updated. + */ + updatedAt?: number; + /** + * the ID of the identity that updated the current metastore. + */ + updatedBy?: string; +} + +export interface GetCurrentMetastoreMetastoreInfoArgs { + cloud?: pulumi.Input; + /** + * Timestamp (in milliseconds) when the current metastore was created. + */ + createdAt?: pulumi.Input; + /** + * the ID of the identity that created the current metastore. + */ + createdBy?: pulumi.Input; + /** + * the ID of the default data access configuration. + */ + defaultDataAccessConfigId?: pulumi.Input; + /** + * The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + */ + deltaSharingOrganizationName?: pulumi.Input; + /** + * the expiration duration in seconds on recipient data access tokens. + */ + deltaSharingRecipientTokenLifetimeInSeconds?: pulumi.Input; + /** + * Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + */ + deltaSharingScope?: pulumi.Input; + /** + * Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + */ + globalMetastoreId?: pulumi.Input; + /** + * Metastore ID. + */ + metastoreId?: pulumi.Input; + /** + * Name of metastore. + */ + name?: pulumi.Input; + /** + * Username/group name/sp applicationId of the metastore owner. + */ + owner?: pulumi.Input; + /** + * the version of the privilege model used by the metastore. + */ + privilegeModelVersion?: pulumi.Input; + /** + * (Mandatory for account-level) The region of the metastore. + */ + region?: pulumi.Input; + /** + * Path on cloud storage account, where managed `databricks.Table` are stored. + */ + storageRoot?: pulumi.Input; + /** + * ID of a storage credential used for the `storageRoot`. + */ + storageRootCredentialId?: pulumi.Input; + /** + * Name of a storage credential used for the `storageRoot`. + */ + storageRootCredentialName?: pulumi.Input; + /** + * Timestamp (in milliseconds) when the current metastore was updated. + */ + updatedAt?: pulumi.Input; + /** + * the ID of the identity that updated the current metastore. + */ + updatedBy?: pulumi.Input; +} + export interface GetInstancePoolPoolInfo { awsAttributes?: inputs.GetInstancePoolPoolInfoAwsAttributes; azureAttributes?: inputs.GetInstancePoolPoolInfoAzureAttributes; @@ -3060,7 +3204,7 @@ export interface GetMetastoreMetastoreInfo { privilegeModelVersion?: string; region?: string; /** - * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * Path on cloud storage account, where managed `databricks.Table` are stored. */ storageRoot?: string; storageRootCredentialId?: string; @@ -3102,7 +3246,7 @@ export interface GetMetastoreMetastoreInfoArgs { privilegeModelVersion?: pulumi.Input; region?: pulumi.Input; /** - * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * Path on cloud storage account, where managed `databricks.Table` are stored. */ storageRoot?: pulumi.Input; storageRootCredentialId?: pulumi.Input; @@ -3262,6 +3406,7 @@ export interface GetShareObjectPartitionValueArgs { } export interface GetSqlWarehouseChannel { + dbsqlVersion?: string; /** * Name of the SQL warehouse to search (case-sensitive). */ @@ -3269,44 +3414,71 @@ export interface GetSqlWarehouseChannel { } export interface GetSqlWarehouseChannelArgs { + dbsqlVersion?: pulumi.Input; /** * Name of the SQL warehouse to search (case-sensitive). */ name?: pulumi.Input; } +export interface GetSqlWarehouseHealth { + details?: string; + failureReason?: inputs.GetSqlWarehouseHealthFailureReason; + message?: string; + status?: string; + summary?: string; +} + +export interface GetSqlWarehouseHealthArgs { + details?: pulumi.Input; + failureReason?: pulumi.Input; + message?: pulumi.Input; + status?: pulumi.Input; + summary?: pulumi.Input; +} + +export interface GetSqlWarehouseHealthFailureReason { + code?: string; + parameters?: {[key: string]: any}; + type?: string; +} + +export interface GetSqlWarehouseHealthFailureReasonArgs { + code?: pulumi.Input; + parameters?: pulumi.Input<{[key: string]: any}>; + type?: pulumi.Input; +} + export interface GetSqlWarehouseOdbcParams { - host?: string; hostname?: string; - path: string; - port: number; - protocol: string; + path?: string; + port?: number; + protocol?: string; } export interface GetSqlWarehouseOdbcParamsArgs { - host?: pulumi.Input; hostname?: pulumi.Input; - path: pulumi.Input; - port: pulumi.Input; - protocol: pulumi.Input; + path?: pulumi.Input; + port?: pulumi.Input; + protocol?: pulumi.Input; } export interface GetSqlWarehouseTags { - customTags: inputs.GetSqlWarehouseTagsCustomTag[]; + customTags?: inputs.GetSqlWarehouseTagsCustomTag[]; } export interface GetSqlWarehouseTagsArgs { - customTags: pulumi.Input[]>; + customTags?: pulumi.Input[]>; } export interface GetSqlWarehouseTagsCustomTag { - key: string; - value: string; + key?: string; + value?: string; } export interface GetSqlWarehouseTagsCustomTagArgs { - key: pulumi.Input; - value: pulumi.Input; + key?: pulumi.Input; + value?: pulumi.Input; } export interface GrantsGrant { @@ -5628,22 +5800,36 @@ export interface SqlAlertOptions { } export interface SqlEndpointChannel { + dbsqlVersion?: pulumi.Input; /** * Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. */ name?: pulumi.Input; } +export interface SqlEndpointHealth { + details?: pulumi.Input; + failureReason?: pulumi.Input; + message?: pulumi.Input; + status?: pulumi.Input; + summary?: pulumi.Input; +} + +export interface SqlEndpointHealthFailureReason { + code?: pulumi.Input; + parameters?: pulumi.Input<{[key: string]: any}>; + type?: pulumi.Input; +} + export interface SqlEndpointOdbcParams { - host?: pulumi.Input; hostname?: pulumi.Input; - path: pulumi.Input; - port: pulumi.Input; - protocol: pulumi.Input; + path?: pulumi.Input; + port?: pulumi.Input; + protocol?: pulumi.Input; } export interface SqlEndpointTags { - customTags: pulumi.Input[]>; + customTags?: pulumi.Input[]>; } export interface SqlEndpointTagsCustomTag { diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index 370be10b..2e4cef3c 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -702,6 +702,78 @@ export interface GetClusterClusterInfoTerminationReason { type?: string; } +export interface GetCurrentMetastoreMetastoreInfo { + cloud?: string; + /** + * Timestamp (in milliseconds) when the current metastore was created. + */ + createdAt?: number; + /** + * the ID of the identity that created the current metastore. + */ + createdBy?: string; + /** + * the ID of the default data access configuration. + */ + defaultDataAccessConfigId?: string; + /** + * The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + */ + deltaSharingOrganizationName?: string; + /** + * the expiration duration in seconds on recipient data access tokens. + */ + deltaSharingRecipientTokenLifetimeInSeconds?: number; + /** + * Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + */ + deltaSharingScope?: string; + /** + * Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + */ + globalMetastoreId?: string; + /** + * Metastore ID. + */ + metastoreId?: string; + /** + * Name of metastore. + */ + name?: string; + /** + * Username/group name/sp applicationId of the metastore owner. + */ + owner?: string; + /** + * the version of the privilege model used by the metastore. + */ + privilegeModelVersion?: string; + /** + * (Mandatory for account-level) The region of the metastore. + */ + region?: string; + /** + * Path on cloud storage account, where managed `databricks.Table` are stored. + */ + storageRoot?: string; + /** + * ID of a storage credential used for the `storageRoot`. + */ + storageRootCredentialId?: string; + /** + * Name of a storage credential used for the `storageRoot`. + */ + storageRootCredentialName?: string; + /** + * Timestamp (in milliseconds) when the current metastore was updated. + */ + updatedAt?: number; + /** + * the ID of the identity that updated the current metastore. + */ + updatedBy?: string; +} + export interface GetDbfsFilePathsPathList { fileSize?: number; /** @@ -1784,7 +1856,7 @@ export interface GetMetastoreMetastoreInfo { privilegeModelVersion?: string; region?: string; /** - * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. + * Path on cloud storage account, where managed `databricks.Table` are stored. */ storageRoot?: string; storageRootCredentialId?: string; @@ -1877,27 +1949,41 @@ export interface GetShareObjectPartitionValue { } export interface GetSqlWarehouseChannel { + dbsqlVersion?: string; /** * Name of the SQL warehouse to search (case-sensitive). */ name?: string; } +export interface GetSqlWarehouseHealth { + details?: string; + failureReason?: outputs.GetSqlWarehouseHealthFailureReason; + message?: string; + status?: string; + summary?: string; +} + +export interface GetSqlWarehouseHealthFailureReason { + code?: string; + parameters?: {[key: string]: any}; + type?: string; +} + export interface GetSqlWarehouseOdbcParams { - host?: string; hostname?: string; - path: string; - port: number; - protocol: string; + path?: string; + port?: number; + protocol?: string; } export interface GetSqlWarehouseTags { - customTags: outputs.GetSqlWarehouseTagsCustomTag[]; + customTags?: outputs.GetSqlWarehouseTagsCustomTag[]; } export interface GetSqlWarehouseTagsCustomTag { - key: string; - value: string; + key?: string; + value?: string; } export interface GrantsGrant { @@ -4219,22 +4305,36 @@ export interface SqlAlertOptions { } export interface SqlEndpointChannel { + dbsqlVersion?: string; /** * Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. */ name?: string; } +export interface SqlEndpointHealth { + details?: string; + failureReason?: outputs.SqlEndpointHealthFailureReason; + message?: string; + status?: string; + summary?: string; +} + +export interface SqlEndpointHealthFailureReason { + code?: string; + parameters?: {[key: string]: any}; + type?: string; +} + export interface SqlEndpointOdbcParams { - host?: string; hostname?: string; - path: string; - port: number; - protocol: string; + path?: string; + port?: number; + protocol?: string; } export interface SqlEndpointTags { - customTags: outputs.SqlEndpointTagsCustomTag[]; + customTags?: outputs.SqlEndpointTagsCustomTag[]; } export interface SqlEndpointTagsCustomTag { diff --git a/sdk/nodejs/volume.ts b/sdk/nodejs/volume.ts index 608c7ea4..5ef1396b 100644 --- a/sdk/nodejs/volume.ts +++ b/sdk/nodejs/volume.ts @@ -7,6 +7,8 @@ import * as utilities from "./utilities"; /** * > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). * + * > **Note** This resource could be only used with workspace-level provider! + * * Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. * * A volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog. diff --git a/sdk/python/pulumi_databricks/__init__.py b/sdk/python/pulumi_databricks/__init__.py index 5e4c84ee..2a009370 100644 --- a/sdk/python/pulumi_databricks/__init__.py +++ b/sdk/python/pulumi_databricks/__init__.py @@ -25,6 +25,7 @@ from .get_cluster_policy import * from .get_clusters import * from .get_current_config import * +from .get_current_metastore import * from .get_current_user import * from .get_dbfs_file import * from .get_dbfs_file_paths import * @@ -57,6 +58,7 @@ from .get_zones import * from .git_credential import * from .global_init_script import * +from .grant import * from .grants import * from .group import * from .group_instance_profile import * @@ -246,6 +248,14 @@ "databricks:index/globalInitScript:GlobalInitScript": "GlobalInitScript" } }, + { + "pkg": "databricks", + "mod": "index/grant", + "fqn": "pulumi_databricks", + "classes": { + "databricks:index/grant:Grant": "Grant" + } + }, { "pkg": "databricks", "mod": "index/grants", diff --git a/sdk/python/pulumi_databricks/_inputs.py b/sdk/python/pulumi_databricks/_inputs.py index 55a48f81..747d918b 100644 --- a/sdk/python/pulumi_databricks/_inputs.py +++ b/sdk/python/pulumi_databricks/_inputs.py @@ -254,6 +254,8 @@ 'ShareObjectPartitionValueArgs', 'SqlAlertOptionsArgs', 'SqlEndpointChannelArgs', + 'SqlEndpointHealthArgs', + 'SqlEndpointHealthFailureReasonArgs', 'SqlEndpointOdbcParamsArgs', 'SqlEndpointTagsArgs', 'SqlEndpointTagsCustomTagArgs', @@ -311,6 +313,7 @@ 'GetClusterClusterInfoInitScriptVolumesArgs', 'GetClusterClusterInfoInitScriptWorkspaceArgs', 'GetClusterClusterInfoTerminationReasonArgs', + 'GetCurrentMetastoreMetastoreInfoArgs', 'GetInstancePoolPoolInfoArgs', 'GetInstancePoolPoolInfoAwsAttributesArgs', 'GetInstancePoolPoolInfoAzureAttributesArgs', @@ -466,6 +469,8 @@ 'GetShareObjectPartitionArgs', 'GetShareObjectPartitionValueArgs', 'GetSqlWarehouseChannelArgs', + 'GetSqlWarehouseHealthArgs', + 'GetSqlWarehouseHealthFailureReasonArgs', 'GetSqlWarehouseOdbcParamsArgs', 'GetSqlWarehouseTagsArgs', 'GetSqlWarehouseTagsCustomTagArgs', @@ -13405,13 +13410,25 @@ def muted(self, value: Optional[pulumi.Input[bool]]): @pulumi.input_type class SqlEndpointChannelArgs: def __init__(__self__, *, + dbsql_version: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. """ + if dbsql_version is not None: + pulumi.set(__self__, "dbsql_version", dbsql_version) if name is not None: pulumi.set(__self__, "name", name) + @property + @pulumi.getter(name="dbsqlVersion") + def dbsql_version(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "dbsql_version") + + @dbsql_version.setter + def dbsql_version(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "dbsql_version", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -13426,56 +13443,126 @@ def name(self, value: Optional[pulumi.Input[str]]): @pulumi.input_type -class SqlEndpointOdbcParamsArgs: +class SqlEndpointHealthArgs: def __init__(__self__, *, - path: pulumi.Input[str], - port: pulumi.Input[int], - protocol: pulumi.Input[str], - host: Optional[pulumi.Input[str]] = None, - hostname: Optional[pulumi.Input[str]] = None): - pulumi.set(__self__, "path", path) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "protocol", protocol) - if host is not None: - pulumi.set(__self__, "host", host) - if hostname is not None: - pulumi.set(__self__, "hostname", hostname) + details: Optional[pulumi.Input[str]] = None, + failure_reason: Optional[pulumi.Input['SqlEndpointHealthFailureReasonArgs']] = None, + message: Optional[pulumi.Input[str]] = None, + status: Optional[pulumi.Input[str]] = None, + summary: Optional[pulumi.Input[str]] = None): + if details is not None: + pulumi.set(__self__, "details", details) + if failure_reason is not None: + pulumi.set(__self__, "failure_reason", failure_reason) + if message is not None: + pulumi.set(__self__, "message", message) + if status is not None: + pulumi.set(__self__, "status", status) + if summary is not None: + pulumi.set(__self__, "summary", summary) @property @pulumi.getter - def path(self) -> pulumi.Input[str]: - return pulumi.get(self, "path") + def details(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "details") - @path.setter - def path(self, value: pulumi.Input[str]): - pulumi.set(self, "path", value) + @details.setter + def details(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "details", value) + + @property + @pulumi.getter(name="failureReason") + def failure_reason(self) -> Optional[pulumi.Input['SqlEndpointHealthFailureReasonArgs']]: + return pulumi.get(self, "failure_reason") + + @failure_reason.setter + def failure_reason(self, value: Optional[pulumi.Input['SqlEndpointHealthFailureReasonArgs']]): + pulumi.set(self, "failure_reason", value) @property @pulumi.getter - def port(self) -> pulumi.Input[int]: - return pulumi.get(self, "port") + def message(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "message") - @port.setter - def port(self, value: pulumi.Input[int]): - pulumi.set(self, "port", value) + @message.setter + def message(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "message", value) @property @pulumi.getter - def protocol(self) -> pulumi.Input[str]: - return pulumi.get(self, "protocol") + def status(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "status") - @protocol.setter - def protocol(self, value: pulumi.Input[str]): - pulumi.set(self, "protocol", value) + @status.setter + def status(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "status", value) @property @pulumi.getter - def host(self) -> Optional[pulumi.Input[str]]: - return pulumi.get(self, "host") + def summary(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "summary") + + @summary.setter + def summary(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "summary", value) - @host.setter - def host(self, value: Optional[pulumi.Input[str]]): - pulumi.set(self, "host", value) + +@pulumi.input_type +class SqlEndpointHealthFailureReasonArgs: + def __init__(__self__, *, + code: Optional[pulumi.Input[str]] = None, + parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None, + type: Optional[pulumi.Input[str]] = None): + if code is not None: + pulumi.set(__self__, "code", code) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) + if type is not None: + pulumi.set(__self__, "type", type) + + @property + @pulumi.getter + def code(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "code") + + @code.setter + def code(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "code", value) + + @property + @pulumi.getter + def parameters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: + return pulumi.get(self, "parameters") + + @parameters.setter + def parameters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): + pulumi.set(self, "parameters", value) + + @property + @pulumi.getter + def type(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "type") + + @type.setter + def type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "type", value) + + +@pulumi.input_type +class SqlEndpointOdbcParamsArgs: + def __init__(__self__, *, + hostname: Optional[pulumi.Input[str]] = None, + path: Optional[pulumi.Input[str]] = None, + port: Optional[pulumi.Input[int]] = None, + protocol: Optional[pulumi.Input[str]] = None): + if hostname is not None: + pulumi.set(__self__, "hostname", hostname) + if path is not None: + pulumi.set(__self__, "path", path) + if port is not None: + pulumi.set(__self__, "port", port) + if protocol is not None: + pulumi.set(__self__, "protocol", protocol) @property @pulumi.getter @@ -13486,20 +13573,48 @@ def hostname(self) -> Optional[pulumi.Input[str]]: def hostname(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "hostname", value) + @property + @pulumi.getter + def path(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "path") + + @path.setter + def path(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "path", value) + + @property + @pulumi.getter + def port(self) -> Optional[pulumi.Input[int]]: + return pulumi.get(self, "port") + + @port.setter + def port(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "port", value) + + @property + @pulumi.getter + def protocol(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "protocol") + + @protocol.setter + def protocol(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "protocol", value) + @pulumi.input_type class SqlEndpointTagsArgs: def __init__(__self__, *, - custom_tags: pulumi.Input[Sequence[pulumi.Input['SqlEndpointTagsCustomTagArgs']]]): - pulumi.set(__self__, "custom_tags", custom_tags) + custom_tags: Optional[pulumi.Input[Sequence[pulumi.Input['SqlEndpointTagsCustomTagArgs']]]] = None): + if custom_tags is not None: + pulumi.set(__self__, "custom_tags", custom_tags) @property @pulumi.getter(name="customTags") - def custom_tags(self) -> pulumi.Input[Sequence[pulumi.Input['SqlEndpointTagsCustomTagArgs']]]: + def custom_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SqlEndpointTagsCustomTagArgs']]]]: return pulumi.get(self, "custom_tags") @custom_tags.setter - def custom_tags(self, value: pulumi.Input[Sequence[pulumi.Input['SqlEndpointTagsCustomTagArgs']]]): + def custom_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SqlEndpointTagsCustomTagArgs']]]]): pulumi.set(self, "custom_tags", value) @@ -16568,6 +16683,297 @@ def type(self, value: Optional[str]): pulumi.set(self, "type", value) +@pulumi.input_type +class GetCurrentMetastoreMetastoreInfoArgs: + def __init__(__self__, *, + cloud: Optional[str] = None, + created_at: Optional[int] = None, + created_by: Optional[str] = None, + default_data_access_config_id: Optional[str] = None, + delta_sharing_organization_name: Optional[str] = None, + delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None, + delta_sharing_scope: Optional[str] = None, + global_metastore_id: Optional[str] = None, + metastore_id: Optional[str] = None, + name: Optional[str] = None, + owner: Optional[str] = None, + privilege_model_version: Optional[str] = None, + region: Optional[str] = None, + storage_root: Optional[str] = None, + storage_root_credential_id: Optional[str] = None, + storage_root_credential_name: Optional[str] = None, + updated_at: Optional[int] = None, + updated_by: Optional[str] = None): + """ + :param int created_at: Timestamp (in milliseconds) when the current metastore was created. + :param str created_by: the ID of the identity that created the current metastore. + :param str default_data_access_config_id: the ID of the default data access configuration. + :param str delta_sharing_organization_name: The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + :param int delta_sharing_recipient_token_lifetime_in_seconds: the expiration duration in seconds on recipient data access tokens. + :param str delta_sharing_scope: Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + :param str global_metastore_id: Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + :param str metastore_id: Metastore ID. + :param str name: Name of metastore. + :param str owner: Username/group name/sp application_id of the metastore owner. + :param str privilege_model_version: the version of the privilege model used by the metastore. + :param str region: (Mandatory for account-level) The region of the metastore. + :param str storage_root: Path on cloud storage account, where managed `Table` are stored. + :param str storage_root_credential_id: ID of a storage credential used for the `storage_root`. + :param str storage_root_credential_name: Name of a storage credential used for the `storage_root`. + :param int updated_at: Timestamp (in milliseconds) when the current metastore was updated. + :param str updated_by: the ID of the identity that updated the current metastore. + """ + if cloud is not None: + pulumi.set(__self__, "cloud", cloud) + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) + if created_by is not None: + pulumi.set(__self__, "created_by", created_by) + if default_data_access_config_id is not None: + pulumi.set(__self__, "default_data_access_config_id", default_data_access_config_id) + if delta_sharing_organization_name is not None: + pulumi.set(__self__, "delta_sharing_organization_name", delta_sharing_organization_name) + if delta_sharing_recipient_token_lifetime_in_seconds is not None: + pulumi.set(__self__, "delta_sharing_recipient_token_lifetime_in_seconds", delta_sharing_recipient_token_lifetime_in_seconds) + if delta_sharing_scope is not None: + pulumi.set(__self__, "delta_sharing_scope", delta_sharing_scope) + if global_metastore_id is not None: + pulumi.set(__self__, "global_metastore_id", global_metastore_id) + if metastore_id is not None: + pulumi.set(__self__, "metastore_id", metastore_id) + if name is not None: + pulumi.set(__self__, "name", name) + if owner is not None: + pulumi.set(__self__, "owner", owner) + if privilege_model_version is not None: + pulumi.set(__self__, "privilege_model_version", privilege_model_version) + if region is not None: + pulumi.set(__self__, "region", region) + if storage_root is not None: + pulumi.set(__self__, "storage_root", storage_root) + if storage_root_credential_id is not None: + pulumi.set(__self__, "storage_root_credential_id", storage_root_credential_id) + if storage_root_credential_name is not None: + pulumi.set(__self__, "storage_root_credential_name", storage_root_credential_name) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) + if updated_by is not None: + pulumi.set(__self__, "updated_by", updated_by) + + @property + @pulumi.getter + def cloud(self) -> Optional[str]: + return pulumi.get(self, "cloud") + + @cloud.setter + def cloud(self, value: Optional[str]): + pulumi.set(self, "cloud", value) + + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[int]: + """ + Timestamp (in milliseconds) when the current metastore was created. + """ + return pulumi.get(self, "created_at") + + @created_at.setter + def created_at(self, value: Optional[int]): + pulumi.set(self, "created_at", value) + + @property + @pulumi.getter(name="createdBy") + def created_by(self) -> Optional[str]: + """ + the ID of the identity that created the current metastore. + """ + return pulumi.get(self, "created_by") + + @created_by.setter + def created_by(self, value: Optional[str]): + pulumi.set(self, "created_by", value) + + @property + @pulumi.getter(name="defaultDataAccessConfigId") + def default_data_access_config_id(self) -> Optional[str]: + """ + the ID of the default data access configuration. + """ + return pulumi.get(self, "default_data_access_config_id") + + @default_data_access_config_id.setter + def default_data_access_config_id(self, value: Optional[str]): + pulumi.set(self, "default_data_access_config_id", value) + + @property + @pulumi.getter(name="deltaSharingOrganizationName") + def delta_sharing_organization_name(self) -> Optional[str]: + """ + The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + """ + return pulumi.get(self, "delta_sharing_organization_name") + + @delta_sharing_organization_name.setter + def delta_sharing_organization_name(self, value: Optional[str]): + pulumi.set(self, "delta_sharing_organization_name", value) + + @property + @pulumi.getter(name="deltaSharingRecipientTokenLifetimeInSeconds") + def delta_sharing_recipient_token_lifetime_in_seconds(self) -> Optional[int]: + """ + the expiration duration in seconds on recipient data access tokens. + """ + return pulumi.get(self, "delta_sharing_recipient_token_lifetime_in_seconds") + + @delta_sharing_recipient_token_lifetime_in_seconds.setter + def delta_sharing_recipient_token_lifetime_in_seconds(self, value: Optional[int]): + pulumi.set(self, "delta_sharing_recipient_token_lifetime_in_seconds", value) + + @property + @pulumi.getter(name="deltaSharingScope") + def delta_sharing_scope(self) -> Optional[str]: + """ + Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + """ + return pulumi.get(self, "delta_sharing_scope") + + @delta_sharing_scope.setter + def delta_sharing_scope(self, value: Optional[str]): + pulumi.set(self, "delta_sharing_scope", value) + + @property + @pulumi.getter(name="globalMetastoreId") + def global_metastore_id(self) -> Optional[str]: + """ + Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + """ + return pulumi.get(self, "global_metastore_id") + + @global_metastore_id.setter + def global_metastore_id(self, value: Optional[str]): + pulumi.set(self, "global_metastore_id", value) + + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> Optional[str]: + """ + Metastore ID. + """ + return pulumi.get(self, "metastore_id") + + @metastore_id.setter + def metastore_id(self, value: Optional[str]): + pulumi.set(self, "metastore_id", value) + + @property + @pulumi.getter + def name(self) -> Optional[str]: + """ + Name of metastore. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[str]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter + def owner(self) -> Optional[str]: + """ + Username/group name/sp application_id of the metastore owner. + """ + return pulumi.get(self, "owner") + + @owner.setter + def owner(self, value: Optional[str]): + pulumi.set(self, "owner", value) + + @property + @pulumi.getter(name="privilegeModelVersion") + def privilege_model_version(self) -> Optional[str]: + """ + the version of the privilege model used by the metastore. + """ + return pulumi.get(self, "privilege_model_version") + + @privilege_model_version.setter + def privilege_model_version(self, value: Optional[str]): + pulumi.set(self, "privilege_model_version", value) + + @property + @pulumi.getter + def region(self) -> Optional[str]: + """ + (Mandatory for account-level) The region of the metastore. + """ + return pulumi.get(self, "region") + + @region.setter + def region(self, value: Optional[str]): + pulumi.set(self, "region", value) + + @property + @pulumi.getter(name="storageRoot") + def storage_root(self) -> Optional[str]: + """ + Path on cloud storage account, where managed `Table` are stored. + """ + return pulumi.get(self, "storage_root") + + @storage_root.setter + def storage_root(self, value: Optional[str]): + pulumi.set(self, "storage_root", value) + + @property + @pulumi.getter(name="storageRootCredentialId") + def storage_root_credential_id(self) -> Optional[str]: + """ + ID of a storage credential used for the `storage_root`. + """ + return pulumi.get(self, "storage_root_credential_id") + + @storage_root_credential_id.setter + def storage_root_credential_id(self, value: Optional[str]): + pulumi.set(self, "storage_root_credential_id", value) + + @property + @pulumi.getter(name="storageRootCredentialName") + def storage_root_credential_name(self) -> Optional[str]: + """ + Name of a storage credential used for the `storage_root`. + """ + return pulumi.get(self, "storage_root_credential_name") + + @storage_root_credential_name.setter + def storage_root_credential_name(self, value: Optional[str]): + pulumi.set(self, "storage_root_credential_name", value) + + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[int]: + """ + Timestamp (in milliseconds) when the current metastore was updated. + """ + return pulumi.get(self, "updated_at") + + @updated_at.setter + def updated_at(self, value: Optional[int]): + pulumi.set(self, "updated_at", value) + + @property + @pulumi.getter(name="updatedBy") + def updated_by(self) -> Optional[str]: + """ + the ID of the identity that updated the current metastore. + """ + return pulumi.get(self, "updated_by") + + @updated_by.setter + def updated_by(self, value: Optional[str]): + pulumi.set(self, "updated_by", value) + + @pulumi.input_type class GetInstancePoolPoolInfoArgs: def __init__(__self__, *, @@ -23997,7 +24403,7 @@ def __init__(__self__, *, :param str metastore_id: Id of the metastore to be fetched :param str name: Name of metastore. :param str owner: Username/groupname/sp application_id of the metastore owner. - :param str storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. + :param str storage_root: Path on cloud storage account, where managed `Table` are stored. """ if cloud is not None: pulumi.set(__self__, "cloud", cloud) @@ -24175,7 +24581,7 @@ def region(self, value: Optional[str]): @pulumi.getter(name="storageRoot") def storage_root(self) -> Optional[str]: """ - Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. + Path on cloud storage account, where managed `Table` are stored. """ return pulumi.get(self, "storage_root") @@ -24679,13 +25085,25 @@ def value(self, value: Optional[str]): @pulumi.input_type class GetSqlWarehouseChannelArgs: def __init__(__self__, *, + dbsql_version: Optional[str] = None, name: Optional[str] = None): """ :param str name: Name of the SQL warehouse to search (case-sensitive). """ + if dbsql_version is not None: + pulumi.set(__self__, "dbsql_version", dbsql_version) if name is not None: pulumi.set(__self__, "name", name) + @property + @pulumi.getter(name="dbsqlVersion") + def dbsql_version(self) -> Optional[str]: + return pulumi.get(self, "dbsql_version") + + @dbsql_version.setter + def dbsql_version(self, value: Optional[str]): + pulumi.set(self, "dbsql_version", value) + @property @pulumi.getter def name(self) -> Optional[str]: @@ -24700,56 +25118,126 @@ def name(self, value: Optional[str]): @pulumi.input_type -class GetSqlWarehouseOdbcParamsArgs: +class GetSqlWarehouseHealthArgs: def __init__(__self__, *, - path: str, - port: int, - protocol: str, - host: Optional[str] = None, - hostname: Optional[str] = None): - pulumi.set(__self__, "path", path) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "protocol", protocol) - if host is not None: - pulumi.set(__self__, "host", host) - if hostname is not None: - pulumi.set(__self__, "hostname", hostname) + details: Optional[str] = None, + failure_reason: Optional['GetSqlWarehouseHealthFailureReasonArgs'] = None, + message: Optional[str] = None, + status: Optional[str] = None, + summary: Optional[str] = None): + if details is not None: + pulumi.set(__self__, "details", details) + if failure_reason is not None: + pulumi.set(__self__, "failure_reason", failure_reason) + if message is not None: + pulumi.set(__self__, "message", message) + if status is not None: + pulumi.set(__self__, "status", status) + if summary is not None: + pulumi.set(__self__, "summary", summary) @property @pulumi.getter - def path(self) -> str: - return pulumi.get(self, "path") + def details(self) -> Optional[str]: + return pulumi.get(self, "details") - @path.setter - def path(self, value: str): - pulumi.set(self, "path", value) + @details.setter + def details(self, value: Optional[str]): + pulumi.set(self, "details", value) + + @property + @pulumi.getter(name="failureReason") + def failure_reason(self) -> Optional['GetSqlWarehouseHealthFailureReasonArgs']: + return pulumi.get(self, "failure_reason") + + @failure_reason.setter + def failure_reason(self, value: Optional['GetSqlWarehouseHealthFailureReasonArgs']): + pulumi.set(self, "failure_reason", value) @property @pulumi.getter - def port(self) -> int: - return pulumi.get(self, "port") + def message(self) -> Optional[str]: + return pulumi.get(self, "message") - @port.setter - def port(self, value: int): - pulumi.set(self, "port", value) + @message.setter + def message(self, value: Optional[str]): + pulumi.set(self, "message", value) @property @pulumi.getter - def protocol(self) -> str: - return pulumi.get(self, "protocol") + def status(self) -> Optional[str]: + return pulumi.get(self, "status") - @protocol.setter - def protocol(self, value: str): - pulumi.set(self, "protocol", value) + @status.setter + def status(self, value: Optional[str]): + pulumi.set(self, "status", value) @property @pulumi.getter - def host(self) -> Optional[str]: - return pulumi.get(self, "host") + def summary(self) -> Optional[str]: + return pulumi.get(self, "summary") + + @summary.setter + def summary(self, value: Optional[str]): + pulumi.set(self, "summary", value) - @host.setter - def host(self, value: Optional[str]): - pulumi.set(self, "host", value) + +@pulumi.input_type +class GetSqlWarehouseHealthFailureReasonArgs: + def __init__(__self__, *, + code: Optional[str] = None, + parameters: Optional[Mapping[str, Any]] = None, + type: Optional[str] = None): + if code is not None: + pulumi.set(__self__, "code", code) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) + if type is not None: + pulumi.set(__self__, "type", type) + + @property + @pulumi.getter + def code(self) -> Optional[str]: + return pulumi.get(self, "code") + + @code.setter + def code(self, value: Optional[str]): + pulumi.set(self, "code", value) + + @property + @pulumi.getter + def parameters(self) -> Optional[Mapping[str, Any]]: + return pulumi.get(self, "parameters") + + @parameters.setter + def parameters(self, value: Optional[Mapping[str, Any]]): + pulumi.set(self, "parameters", value) + + @property + @pulumi.getter + def type(self) -> Optional[str]: + return pulumi.get(self, "type") + + @type.setter + def type(self, value: Optional[str]): + pulumi.set(self, "type", value) + + +@pulumi.input_type +class GetSqlWarehouseOdbcParamsArgs: + def __init__(__self__, *, + hostname: Optional[str] = None, + path: Optional[str] = None, + port: Optional[int] = None, + protocol: Optional[str] = None): + if hostname is not None: + pulumi.set(__self__, "hostname", hostname) + if path is not None: + pulumi.set(__self__, "path", path) + if port is not None: + pulumi.set(__self__, "port", port) + if protocol is not None: + pulumi.set(__self__, "protocol", protocol) @property @pulumi.getter @@ -24760,47 +25248,77 @@ def hostname(self) -> Optional[str]: def hostname(self, value: Optional[str]): pulumi.set(self, "hostname", value) + @property + @pulumi.getter + def path(self) -> Optional[str]: + return pulumi.get(self, "path") + + @path.setter + def path(self, value: Optional[str]): + pulumi.set(self, "path", value) + + @property + @pulumi.getter + def port(self) -> Optional[int]: + return pulumi.get(self, "port") + + @port.setter + def port(self, value: Optional[int]): + pulumi.set(self, "port", value) + + @property + @pulumi.getter + def protocol(self) -> Optional[str]: + return pulumi.get(self, "protocol") + + @protocol.setter + def protocol(self, value: Optional[str]): + pulumi.set(self, "protocol", value) + @pulumi.input_type class GetSqlWarehouseTagsArgs: def __init__(__self__, *, - custom_tags: Sequence['GetSqlWarehouseTagsCustomTagArgs']): - pulumi.set(__self__, "custom_tags", custom_tags) + custom_tags: Optional[Sequence['GetSqlWarehouseTagsCustomTagArgs']] = None): + if custom_tags is not None: + pulumi.set(__self__, "custom_tags", custom_tags) @property @pulumi.getter(name="customTags") - def custom_tags(self) -> Sequence['GetSqlWarehouseTagsCustomTagArgs']: + def custom_tags(self) -> Optional[Sequence['GetSqlWarehouseTagsCustomTagArgs']]: return pulumi.get(self, "custom_tags") @custom_tags.setter - def custom_tags(self, value: Sequence['GetSqlWarehouseTagsCustomTagArgs']): + def custom_tags(self, value: Optional[Sequence['GetSqlWarehouseTagsCustomTagArgs']]): pulumi.set(self, "custom_tags", value) @pulumi.input_type class GetSqlWarehouseTagsCustomTagArgs: def __init__(__self__, *, - key: str, - value: str): - pulumi.set(__self__, "key", key) - pulumi.set(__self__, "value", value) + key: Optional[str] = None, + value: Optional[str] = None): + if key is not None: + pulumi.set(__self__, "key", key) + if value is not None: + pulumi.set(__self__, "value", value) @property @pulumi.getter - def key(self) -> str: + def key(self) -> Optional[str]: return pulumi.get(self, "key") @key.setter - def key(self, value: str): + def key(self, value: Optional[str]): pulumi.set(self, "key", value) @property @pulumi.getter - def value(self) -> str: + def value(self) -> Optional[str]: return pulumi.get(self, "value") @value.setter - def value(self, value: str): + def value(self, value: Optional[str]): pulumi.set(self, "value", value) diff --git a/sdk/python/pulumi_databricks/access_control_rule_set.py b/sdk/python/pulumi_databricks/access_control_rule_set.py index 9d91571f..9f849a8f 100644 --- a/sdk/python/pulumi_databricks/access_control_rule_set.py +++ b/sdk/python/pulumi_databricks/access_control_rule_set.py @@ -134,6 +134,8 @@ def __init__(__self__, name: Optional[pulumi.Input[str]] = None, __props__=None): """ + > **Note** This resource could be used with account or workspace-level provider. + This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. > **Note** Currently, we only support managing access rules on service principal, group and account resources through `AccessControlRuleSet`. @@ -277,6 +279,8 @@ def __init__(__self__, args: Optional[AccessControlRuleSetArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be used with account or workspace-level provider. + This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. > **Note** Currently, we only support managing access rules on service principal, group and account resources through `AccessControlRuleSet`. diff --git a/sdk/python/pulumi_databricks/connection.py b/sdk/python/pulumi_databricks/connection.py index 7178cdae..39d62b9a 100644 --- a/sdk/python/pulumi_databricks/connection.py +++ b/sdk/python/pulumi_databricks/connection.py @@ -280,6 +280,8 @@ def __init__(__self__, read_only: Optional[pulumi.Input[bool]] = None, __props__=None): """ + > **Note** This resource could be only used with workspace-level provider! + Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. @@ -366,6 +368,8 @@ def __init__(__self__, args: ConnectionArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with workspace-level provider! + Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. diff --git a/sdk/python/pulumi_databricks/default_namespace_setting.py b/sdk/python/pulumi_databricks/default_namespace_setting.py index 43dc8af9..6d4cb09c 100644 --- a/sdk/python/pulumi_databricks/default_namespace_setting.py +++ b/sdk/python/pulumi_databricks/default_namespace_setting.py @@ -118,6 +118,8 @@ def __init__(__self__, setting_name: Optional[pulumi.Input[str]] = None, __props__=None): """ + > **Note** This resource could be only used with workspace-level provider! + The `DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. Setting the default catalog for the workspace determines the catalog that is used when queries do not reference a fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query @@ -146,6 +148,8 @@ def __init__(__self__, args: DefaultNamespaceSettingArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with workspace-level provider! + The `DefaultNamespaceSetting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. Setting the default catalog for the workspace determines the catalog that is used when queries do not reference a fully qualified 3 level name. For example, if the default catalog is set to 'retail_prod' then a query diff --git a/sdk/python/pulumi_databricks/directory.py b/sdk/python/pulumi_databricks/directory.py index 315fe39a..a0ae5861 100644 --- a/sdk/python/pulumi_databricks/directory.py +++ b/sdk/python/pulumi_databricks/directory.py @@ -67,11 +67,13 @@ class _DirectoryState: def __init__(__self__, *, delete_recursive: Optional[pulumi.Input[bool]] = None, object_id: Optional[pulumi.Input[int]] = None, - path: Optional[pulumi.Input[str]] = None): + path: Optional[pulumi.Input[str]] = None, + workspace_path: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Directory resources. :param pulumi.Input[int] object_id: Unique identifier for a DIRECTORY :param pulumi.Input[str] path: The absolute path of the directory, beginning with "/", e.g. "/Demo". + :param pulumi.Input[str] workspace_path: path on Workspace File System (WSFS) in form of `/Workspace` + `path` """ if delete_recursive is not None: pulumi.set(__self__, "delete_recursive", delete_recursive) @@ -79,6 +81,8 @@ def __init__(__self__, *, pulumi.set(__self__, "object_id", object_id) if path is not None: pulumi.set(__self__, "path", path) + if workspace_path is not None: + pulumi.set(__self__, "workspace_path", workspace_path) @property @pulumi.getter(name="deleteRecursive") @@ -113,6 +117,18 @@ def path(self) -> Optional[pulumi.Input[str]]: def path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path", value) + @property + @pulumi.getter(name="workspacePath") + def workspace_path(self) -> Optional[pulumi.Input[str]]: + """ + path on Workspace File System (WSFS) in form of `/Workspace` + `path` + """ + return pulumi.get(self, "workspace_path") + + @workspace_path.setter + def workspace_path(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "workspace_path", value) + class Directory(pulumi.CustomResource): @overload @@ -184,6 +200,7 @@ def _internal_init(__self__, if path is None and not opts.urn: raise TypeError("Missing required property 'path'") __props__.__dict__["path"] = path + __props__.__dict__["workspace_path"] = None super(Directory, __self__).__init__( 'databricks:index/directory:Directory', resource_name, @@ -196,7 +213,8 @@ def get(resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, delete_recursive: Optional[pulumi.Input[bool]] = None, object_id: Optional[pulumi.Input[int]] = None, - path: Optional[pulumi.Input[str]] = None) -> 'Directory': + path: Optional[pulumi.Input[str]] = None, + workspace_path: Optional[pulumi.Input[str]] = None) -> 'Directory': """ Get an existing Directory resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -206,6 +224,7 @@ def get(resource_name: str, :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] object_id: Unique identifier for a DIRECTORY :param pulumi.Input[str] path: The absolute path of the directory, beginning with "/", e.g. "/Demo". + :param pulumi.Input[str] workspace_path: path on Workspace File System (WSFS) in form of `/Workspace` + `path` """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -214,6 +233,7 @@ def get(resource_name: str, __props__.__dict__["delete_recursive"] = delete_recursive __props__.__dict__["object_id"] = object_id __props__.__dict__["path"] = path + __props__.__dict__["workspace_path"] = workspace_path return Directory(resource_name, opts=opts, __props__=__props__) @property @@ -237,3 +257,11 @@ def path(self) -> pulumi.Output[str]: """ return pulumi.get(self, "path") + @property + @pulumi.getter(name="workspacePath") + def workspace_path(self) -> pulumi.Output[str]: + """ + path on Workspace File System (WSFS) in form of `/Workspace` + `path` + """ + return pulumi.get(self, "workspace_path") + diff --git a/sdk/python/pulumi_databricks/external_location.py b/sdk/python/pulumi_databricks/external_location.py index 3fb998e8..f7a31acf 100644 --- a/sdk/python/pulumi_databricks/external_location.py +++ b/sdk/python/pulumi_databricks/external_location.py @@ -422,6 +422,8 @@ def __init__(__self__, url: Optional[pulumi.Input[str]] = None, __props__=None): """ + > **Note** This resource could be only used with workspace-level provider! + To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: - StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. @@ -456,6 +458,8 @@ def __init__(__self__, args: ExternalLocationArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with workspace-level provider! + To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: - StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. diff --git a/sdk/python/pulumi_databricks/get_current_metastore.py b/sdk/python/pulumi_databricks/get_current_metastore.py new file mode 100644 index 00000000..b2129679 --- /dev/null +++ b/sdk/python/pulumi_databricks/get_current_metastore.py @@ -0,0 +1,141 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities +from . import outputs +from ._inputs import * + +__all__ = [ + 'GetCurrentMetastoreResult', + 'AwaitableGetCurrentMetastoreResult', + 'get_current_metastore', + 'get_current_metastore_output', +] + +@pulumi.output_type +class GetCurrentMetastoreResult: + """ + A collection of values returned by getCurrentMetastore. + """ + def __init__(__self__, id=None, metastore_info=None): + if id and not isinstance(id, str): + raise TypeError("Expected argument 'id' to be a str") + pulumi.set(__self__, "id", id) + if metastore_info and not isinstance(metastore_info, dict): + raise TypeError("Expected argument 'metastore_info' to be a dict") + pulumi.set(__self__, "metastore_info", metastore_info) + + @property + @pulumi.getter + def id(self) -> str: + """ + metastore ID. + """ + return pulumi.get(self, "id") + + @property + @pulumi.getter(name="metastoreInfo") + def metastore_info(self) -> 'outputs.GetCurrentMetastoreMetastoreInfoResult': + """ + summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + """ + return pulumi.get(self, "metastore_info") + + +class AwaitableGetCurrentMetastoreResult(GetCurrentMetastoreResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetCurrentMetastoreResult( + id=self.id, + metastore_info=self.metastore_info) + + +def get_current_metastore(id: Optional[str] = None, + metastore_info: Optional[pulumi.InputType['GetCurrentMetastoreMetastoreInfoArgs']] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCurrentMetastoreResult: + """ + Retrieves information about metastore attached to a given workspace. + + > **Note** This is the workspace-level data source. + + > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + + ## Example Usage + + MetastoreSummary response for a metastore attached to the current workspace. + + ```python + import pulumi + import pulumi_databricks as databricks + + this = databricks.get_current_metastore() + pulumi.export("someMetastore", data["databricks_metastore"]["this"]["metastore_info"]) + ``` + ## Related Resources + + The following resources are used in the same context: + + * Metastore to get information for a metastore with a given ID. + * get_metastores to get a mapping of name to id of all metastores. + * Metastore to manage Metastores within Unity Catalog. + * Catalog to manage catalogs within Unity Catalog. + + + :param str id: metastore ID. + :param pulumi.InputType['GetCurrentMetastoreMetastoreInfoArgs'] metastore_info: summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + """ + __args__ = dict() + __args__['id'] = id + __args__['metastoreInfo'] = metastore_info + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('databricks:index/getCurrentMetastore:getCurrentMetastore', __args__, opts=opts, typ=GetCurrentMetastoreResult).value + + return AwaitableGetCurrentMetastoreResult( + id=pulumi.get(__ret__, 'id'), + metastore_info=pulumi.get(__ret__, 'metastore_info')) + + +@_utilities.lift_output_func(get_current_metastore) +def get_current_metastore_output(id: Optional[pulumi.Input[Optional[str]]] = None, + metastore_info: Optional[pulumi.Input[Optional[pulumi.InputType['GetCurrentMetastoreMetastoreInfoArgs']]]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCurrentMetastoreResult]: + """ + Retrieves information about metastore attached to a given workspace. + + > **Note** This is the workspace-level data source. + + > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute to prevent _authentication is not configured for provider_ errors. + + ## Example Usage + + MetastoreSummary response for a metastore attached to the current workspace. + + ```python + import pulumi + import pulumi_databricks as databricks + + this = databricks.get_current_metastore() + pulumi.export("someMetastore", data["databricks_metastore"]["this"]["metastore_info"]) + ``` + ## Related Resources + + The following resources are used in the same context: + + * Metastore to get information for a metastore with a given ID. + * get_metastores to get a mapping of name to id of all metastores. + * Metastore to manage Metastores within Unity Catalog. + * Catalog to manage catalogs within Unity Catalog. + + + :param str id: metastore ID. + :param pulumi.InputType['GetCurrentMetastoreMetastoreInfoArgs'] metastore_info: summary about a metastore attached to the current workspace returned by [Get a metastore summary API](https://docs.databricks.com/api/workspace/metastores/summary). This contains the following attributes (check the API page for up-to-date details): + """ + ... diff --git a/sdk/python/pulumi_databricks/get_directory.py b/sdk/python/pulumi_databricks/get_directory.py index 55b0551f..bdd1e827 100644 --- a/sdk/python/pulumi_databricks/get_directory.py +++ b/sdk/python/pulumi_databricks/get_directory.py @@ -21,7 +21,7 @@ class GetDirectoryResult: """ A collection of values returned by getDirectory. """ - def __init__(__self__, id=None, object_id=None, path=None): + def __init__(__self__, id=None, object_id=None, path=None, workspace_path=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) @@ -31,6 +31,9 @@ def __init__(__self__, id=None, object_id=None, path=None): if path and not isinstance(path, str): raise TypeError("Expected argument 'path' to be a str") pulumi.set(__self__, "path", path) + if workspace_path and not isinstance(workspace_path, str): + raise TypeError("Expected argument 'workspace_path' to be a str") + pulumi.set(__self__, "workspace_path", workspace_path) @property @pulumi.getter @@ -53,6 +56,14 @@ def object_id(self) -> int: def path(self) -> str: return pulumi.get(self, "path") + @property + @pulumi.getter(name="workspacePath") + def workspace_path(self) -> str: + """ + path on Workspace File System (WSFS) in form of `/Workspace` + `path` + """ + return pulumi.get(self, "workspace_path") + class AwaitableGetDirectoryResult(GetDirectoryResult): # pylint: disable=using-constant-test @@ -62,7 +73,8 @@ def __await__(self): return GetDirectoryResult( id=self.id, object_id=self.object_id, - path=self.path) + path=self.path, + workspace_path=self.workspace_path) def get_directory(object_id: Optional[int] = None, @@ -95,7 +107,8 @@ def get_directory(object_id: Optional[int] = None, return AwaitableGetDirectoryResult( id=pulumi.get(__ret__, 'id'), object_id=pulumi.get(__ret__, 'object_id'), - path=pulumi.get(__ret__, 'path')) + path=pulumi.get(__ret__, 'path'), + workspace_path=pulumi.get(__ret__, 'workspace_path')) @_utilities.lift_output_func(get_directory) diff --git a/sdk/python/pulumi_databricks/get_service_principal.py b/sdk/python/pulumi_databricks/get_service_principal.py index 69cef6eb..29a601b0 100644 --- a/sdk/python/pulumi_databricks/get_service_principal.py +++ b/sdk/python/pulumi_databricks/get_service_principal.py @@ -180,7 +180,7 @@ def get_service_principal(acl_principal_id: Optional[str] = None, :param str acl_principal_id: identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. :param bool active: Whether service principal is active or not. :param str application_id: ID of the service principal. The service principal must exist before this resource can be retrieved. - :param str display_name: Display name of the service principal, e.g. `Foo SPN`. + :param str display_name: Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. :param str external_id: ID of the service principal in an external identity provider. :param str home: Home folder of the service principal, e.g. `/Users/11111111-2222-3333-4444-555666777888`. :param str id: The id of the service principal. @@ -258,7 +258,7 @@ def get_service_principal_output(acl_principal_id: Optional[pulumi.Input[Optiona :param str acl_principal_id: identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. :param bool active: Whether service principal is active or not. :param str application_id: ID of the service principal. The service principal must exist before this resource can be retrieved. - :param str display_name: Display name of the service principal, e.g. `Foo SPN`. + :param str display_name: Exact display name of the service principal. The service principal must exist before this resource can be retrieved. In case if there are several service principals with the same name, an error is thrown. :param str external_id: ID of the service principal in an external identity provider. :param str home: Home folder of the service principal, e.g. `/Users/11111111-2222-3333-4444-555666777888`. :param str id: The id of the service principal. diff --git a/sdk/python/pulumi_databricks/get_sql_warehouse.py b/sdk/python/pulumi_databricks/get_sql_warehouse.py index 63f7e1ce..ad7ec00d 100644 --- a/sdk/python/pulumi_databricks/get_sql_warehouse.py +++ b/sdk/python/pulumi_databricks/get_sql_warehouse.py @@ -23,7 +23,7 @@ class GetSqlWarehouseResult: """ A collection of values returned by getSqlWarehouse. """ - def __init__(__self__, auto_stop_mins=None, channel=None, cluster_size=None, data_source_id=None, enable_photon=None, enable_serverless_compute=None, id=None, instance_profile_arn=None, jdbc_url=None, max_num_clusters=None, min_num_clusters=None, name=None, num_clusters=None, odbc_params=None, spot_instance_policy=None, state=None, tags=None): + def __init__(__self__, auto_stop_mins=None, channel=None, cluster_size=None, creator_name=None, data_source_id=None, enable_photon=None, enable_serverless_compute=None, health=None, id=None, instance_profile_arn=None, jdbc_url=None, max_num_clusters=None, min_num_clusters=None, name=None, num_active_sessions=None, num_clusters=None, odbc_params=None, spot_instance_policy=None, state=None, tags=None, warehouse_type=None): if auto_stop_mins and not isinstance(auto_stop_mins, int): raise TypeError("Expected argument 'auto_stop_mins' to be a int") pulumi.set(__self__, "auto_stop_mins", auto_stop_mins) @@ -33,6 +33,9 @@ def __init__(__self__, auto_stop_mins=None, channel=None, cluster_size=None, dat if cluster_size and not isinstance(cluster_size, str): raise TypeError("Expected argument 'cluster_size' to be a str") pulumi.set(__self__, "cluster_size", cluster_size) + if creator_name and not isinstance(creator_name, str): + raise TypeError("Expected argument 'creator_name' to be a str") + pulumi.set(__self__, "creator_name", creator_name) if data_source_id and not isinstance(data_source_id, str): raise TypeError("Expected argument 'data_source_id' to be a str") pulumi.set(__self__, "data_source_id", data_source_id) @@ -42,6 +45,9 @@ def __init__(__self__, auto_stop_mins=None, channel=None, cluster_size=None, dat if enable_serverless_compute and not isinstance(enable_serverless_compute, bool): raise TypeError("Expected argument 'enable_serverless_compute' to be a bool") pulumi.set(__self__, "enable_serverless_compute", enable_serverless_compute) + if health and not isinstance(health, dict): + raise TypeError("Expected argument 'health' to be a dict") + pulumi.set(__self__, "health", health) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) @@ -60,6 +66,9 @@ def __init__(__self__, auto_stop_mins=None, channel=None, cluster_size=None, dat if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) + if num_active_sessions and not isinstance(num_active_sessions, int): + raise TypeError("Expected argument 'num_active_sessions' to be a int") + pulumi.set(__self__, "num_active_sessions", num_active_sessions) if num_clusters and not isinstance(num_clusters, int): raise TypeError("Expected argument 'num_clusters' to be a int") pulumi.set(__self__, "num_clusters", num_clusters) @@ -75,6 +84,9 @@ def __init__(__self__, auto_stop_mins=None, channel=None, cluster_size=None, dat if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) + if warehouse_type and not isinstance(warehouse_type, str): + raise TypeError("Expected argument 'warehouse_type' to be a str") + pulumi.set(__self__, "warehouse_type", warehouse_type) @property @pulumi.getter(name="autoStopMins") @@ -100,6 +112,14 @@ def cluster_size(self) -> str: """ return pulumi.get(self, "cluster_size") + @property + @pulumi.getter(name="creatorName") + def creator_name(self) -> str: + """ + The username of the user who created the endpoint. + """ + return pulumi.get(self, "creator_name") + @property @pulumi.getter(name="dataSourceId") def data_source_id(self) -> str: @@ -124,6 +144,14 @@ def enable_serverless_compute(self) -> bool: """ return pulumi.get(self, "enable_serverless_compute") + @property + @pulumi.getter + def health(self) -> 'outputs.GetSqlWarehouseHealthResult': + """ + Health status of the endpoint. + """ + return pulumi.get(self, "health") + @property @pulumi.getter def id(self) -> str: @@ -169,9 +197,20 @@ def name(self) -> str: """ return pulumi.get(self, "name") + @property + @pulumi.getter(name="numActiveSessions") + def num_active_sessions(self) -> int: + """ + The current number of clusters used by the endpoint. + """ + return pulumi.get(self, "num_active_sessions") + @property @pulumi.getter(name="numClusters") def num_clusters(self) -> int: + """ + The current number of clusters used by the endpoint. + """ return pulumi.get(self, "num_clusters") @property @@ -193,6 +232,9 @@ def spot_instance_policy(self) -> str: @property @pulumi.getter def state(self) -> str: + """ + The current state of the endpoint. + """ return pulumi.get(self, "state") @property @@ -203,6 +245,14 @@ def tags(self) -> 'outputs.GetSqlWarehouseTagsResult': """ return pulumi.get(self, "tags") + @property + @pulumi.getter(name="warehouseType") + def warehouse_type(self) -> str: + """ + SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). + """ + return pulumi.get(self, "warehouse_type") + class AwaitableGetSqlWarehouseResult(GetSqlWarehouseResult): # pylint: disable=using-constant-test @@ -213,39 +263,47 @@ def __await__(self): auto_stop_mins=self.auto_stop_mins, channel=self.channel, cluster_size=self.cluster_size, + creator_name=self.creator_name, data_source_id=self.data_source_id, enable_photon=self.enable_photon, enable_serverless_compute=self.enable_serverless_compute, + health=self.health, id=self.id, instance_profile_arn=self.instance_profile_arn, jdbc_url=self.jdbc_url, max_num_clusters=self.max_num_clusters, min_num_clusters=self.min_num_clusters, name=self.name, + num_active_sessions=self.num_active_sessions, num_clusters=self.num_clusters, odbc_params=self.odbc_params, spot_instance_policy=self.spot_instance_policy, state=self.state, - tags=self.tags) + tags=self.tags, + warehouse_type=self.warehouse_type) def get_sql_warehouse(auto_stop_mins: Optional[int] = None, channel: Optional[pulumi.InputType['GetSqlWarehouseChannelArgs']] = None, cluster_size: Optional[str] = None, + creator_name: Optional[str] = None, data_source_id: Optional[str] = None, enable_photon: Optional[bool] = None, enable_serverless_compute: Optional[bool] = None, + health: Optional[pulumi.InputType['GetSqlWarehouseHealthArgs']] = None, id: Optional[str] = None, instance_profile_arn: Optional[str] = None, jdbc_url: Optional[str] = None, max_num_clusters: Optional[int] = None, min_num_clusters: Optional[int] = None, name: Optional[str] = None, + num_active_sessions: Optional[int] = None, num_clusters: Optional[int] = None, odbc_params: Optional[pulumi.InputType['GetSqlWarehouseOdbcParamsArgs']] = None, spot_instance_policy: Optional[str] = None, state: Optional[str] = None, tags: Optional[pulumi.InputType['GetSqlWarehouseTagsArgs']] = None, + warehouse_type: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlWarehouseResult: """ > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -286,36 +344,46 @@ def get_sql_warehouse(auto_stop_mins: Optional[int] = None, :param int auto_stop_mins: Time in minutes until an idle SQL warehouse terminates all clusters and stops. :param pulumi.InputType['GetSqlWarehouseChannelArgs'] channel: block, consisting of following fields: :param str cluster_size: The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". + :param str creator_name: The username of the user who created the endpoint. :param str data_source_id: ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. :param bool enable_photon: Whether [Photon](https://databricks.com/product/delta-engine) is enabled. :param bool enable_serverless_compute: Whether this SQL warehouse is a serverless SQL warehouse. + :param pulumi.InputType['GetSqlWarehouseHealthArgs'] health: Health status of the endpoint. :param str id: The ID of the SQL warehouse. :param str jdbc_url: JDBC connection string. :param int max_num_clusters: Maximum number of clusters available when a SQL warehouse is running. :param int min_num_clusters: Minimum number of clusters available when a SQL warehouse is running. :param str name: Name of the SQL warehouse to search (case-sensitive). + :param int num_active_sessions: The current number of clusters used by the endpoint. + :param int num_clusters: The current number of clusters used by the endpoint. :param pulumi.InputType['GetSqlWarehouseOdbcParamsArgs'] odbc_params: ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. :param str spot_instance_policy: The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. + :param str state: The current state of the endpoint. :param pulumi.InputType['GetSqlWarehouseTagsArgs'] tags: tags used for SQL warehouse resources. + :param str warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). """ __args__ = dict() __args__['autoStopMins'] = auto_stop_mins __args__['channel'] = channel __args__['clusterSize'] = cluster_size + __args__['creatorName'] = creator_name __args__['dataSourceId'] = data_source_id __args__['enablePhoton'] = enable_photon __args__['enableServerlessCompute'] = enable_serverless_compute + __args__['health'] = health __args__['id'] = id __args__['instanceProfileArn'] = instance_profile_arn __args__['jdbcUrl'] = jdbc_url __args__['maxNumClusters'] = max_num_clusters __args__['minNumClusters'] = min_num_clusters __args__['name'] = name + __args__['numActiveSessions'] = num_active_sessions __args__['numClusters'] = num_clusters __args__['odbcParams'] = odbc_params __args__['spotInstancePolicy'] = spot_instance_policy __args__['state'] = state __args__['tags'] = tags + __args__['warehouseType'] = warehouse_type opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('databricks:index/getSqlWarehouse:getSqlWarehouse', __args__, opts=opts, typ=GetSqlWarehouseResult).value @@ -323,40 +391,48 @@ def get_sql_warehouse(auto_stop_mins: Optional[int] = None, auto_stop_mins=pulumi.get(__ret__, 'auto_stop_mins'), channel=pulumi.get(__ret__, 'channel'), cluster_size=pulumi.get(__ret__, 'cluster_size'), + creator_name=pulumi.get(__ret__, 'creator_name'), data_source_id=pulumi.get(__ret__, 'data_source_id'), enable_photon=pulumi.get(__ret__, 'enable_photon'), enable_serverless_compute=pulumi.get(__ret__, 'enable_serverless_compute'), + health=pulumi.get(__ret__, 'health'), id=pulumi.get(__ret__, 'id'), instance_profile_arn=pulumi.get(__ret__, 'instance_profile_arn'), jdbc_url=pulumi.get(__ret__, 'jdbc_url'), max_num_clusters=pulumi.get(__ret__, 'max_num_clusters'), min_num_clusters=pulumi.get(__ret__, 'min_num_clusters'), name=pulumi.get(__ret__, 'name'), + num_active_sessions=pulumi.get(__ret__, 'num_active_sessions'), num_clusters=pulumi.get(__ret__, 'num_clusters'), odbc_params=pulumi.get(__ret__, 'odbc_params'), spot_instance_policy=pulumi.get(__ret__, 'spot_instance_policy'), state=pulumi.get(__ret__, 'state'), - tags=pulumi.get(__ret__, 'tags')) + tags=pulumi.get(__ret__, 'tags'), + warehouse_type=pulumi.get(__ret__, 'warehouse_type')) @_utilities.lift_output_func(get_sql_warehouse) def get_sql_warehouse_output(auto_stop_mins: Optional[pulumi.Input[Optional[int]]] = None, channel: Optional[pulumi.Input[Optional[pulumi.InputType['GetSqlWarehouseChannelArgs']]]] = None, cluster_size: Optional[pulumi.Input[Optional[str]]] = None, + creator_name: Optional[pulumi.Input[Optional[str]]] = None, data_source_id: Optional[pulumi.Input[Optional[str]]] = None, enable_photon: Optional[pulumi.Input[Optional[bool]]] = None, enable_serverless_compute: Optional[pulumi.Input[Optional[bool]]] = None, + health: Optional[pulumi.Input[Optional[pulumi.InputType['GetSqlWarehouseHealthArgs']]]] = None, id: Optional[pulumi.Input[Optional[str]]] = None, instance_profile_arn: Optional[pulumi.Input[Optional[str]]] = None, jdbc_url: Optional[pulumi.Input[Optional[str]]] = None, max_num_clusters: Optional[pulumi.Input[Optional[int]]] = None, min_num_clusters: Optional[pulumi.Input[Optional[int]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, + num_active_sessions: Optional[pulumi.Input[Optional[int]]] = None, num_clusters: Optional[pulumi.Input[Optional[int]]] = None, odbc_params: Optional[pulumi.Input[Optional[pulumi.InputType['GetSqlWarehouseOdbcParamsArgs']]]] = None, spot_instance_policy: Optional[pulumi.Input[Optional[str]]] = None, state: Optional[pulumi.Input[Optional[str]]] = None, tags: Optional[pulumi.Input[Optional[pulumi.InputType['GetSqlWarehouseTagsArgs']]]] = None, + warehouse_type: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlWarehouseResult]: """ > **Note** If you have a fully automated setup with workspaces created by databricks_mws_workspaces, please make sure to add depends_on attribute in order to prevent _default auth: cannot configure default credentials_ errors. @@ -397,16 +473,22 @@ def get_sql_warehouse_output(auto_stop_mins: Optional[pulumi.Input[Optional[int] :param int auto_stop_mins: Time in minutes until an idle SQL warehouse terminates all clusters and stops. :param pulumi.InputType['GetSqlWarehouseChannelArgs'] channel: block, consisting of following fields: :param str cluster_size: The size of the clusters allocated to the warehouse: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". + :param str creator_name: The username of the user who created the endpoint. :param str data_source_id: ID of the data source for this warehouse. This is used to bind an Databricks SQL query to an warehouse. :param bool enable_photon: Whether [Photon](https://databricks.com/product/delta-engine) is enabled. :param bool enable_serverless_compute: Whether this SQL warehouse is a serverless SQL warehouse. + :param pulumi.InputType['GetSqlWarehouseHealthArgs'] health: Health status of the endpoint. :param str id: The ID of the SQL warehouse. :param str jdbc_url: JDBC connection string. :param int max_num_clusters: Maximum number of clusters available when a SQL warehouse is running. :param int min_num_clusters: Minimum number of clusters available when a SQL warehouse is running. :param str name: Name of the SQL warehouse to search (case-sensitive). + :param int num_active_sessions: The current number of clusters used by the endpoint. + :param int num_clusters: The current number of clusters used by the endpoint. :param pulumi.InputType['GetSqlWarehouseOdbcParamsArgs'] odbc_params: ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. :param str spot_instance_policy: The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. + :param str state: The current state of the endpoint. :param pulumi.InputType['GetSqlWarehouseTagsArgs'] tags: tags used for SQL warehouse resources. + :param str warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/index.html#warehouse-types) or [Azure](https://learn.microsoft.com/azure/databricks/sql/#warehouse-types). """ ... diff --git a/sdk/python/pulumi_databricks/grant.py b/sdk/python/pulumi_databricks/grant.py new file mode 100644 index 00000000..947b4cb2 --- /dev/null +++ b/sdk/python/pulumi_databricks/grant.py @@ -0,0 +1,608 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities + +__all__ = ['GrantArgs', 'Grant'] + +@pulumi.input_type +class GrantArgs: + def __init__(__self__, *, + principal: pulumi.Input[str], + privileges: pulumi.Input[Sequence[pulumi.Input[str]]], + catalog: Optional[pulumi.Input[str]] = None, + external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, + function: Optional[pulumi.Input[str]] = None, + metastore: Optional[pulumi.Input[str]] = None, + model: Optional[pulumi.Input[str]] = None, + pipeline: Optional[pulumi.Input[str]] = None, + recipient: Optional[pulumi.Input[str]] = None, + schema: Optional[pulumi.Input[str]] = None, + share: Optional[pulumi.Input[str]] = None, + storage_credential: Optional[pulumi.Input[str]] = None, + table: Optional[pulumi.Input[str]] = None, + volume: Optional[pulumi.Input[str]] = None): + """ + The set of arguments for constructing a Grant resource. + """ + pulumi.set(__self__, "principal", principal) + pulumi.set(__self__, "privileges", privileges) + if catalog is not None: + pulumi.set(__self__, "catalog", catalog) + if external_location is not None: + pulumi.set(__self__, "external_location", external_location) + if foreign_connection is not None: + pulumi.set(__self__, "foreign_connection", foreign_connection) + if function is not None: + pulumi.set(__self__, "function", function) + if metastore is not None: + pulumi.set(__self__, "metastore", metastore) + if model is not None: + pulumi.set(__self__, "model", model) + if pipeline is not None: + pulumi.set(__self__, "pipeline", pipeline) + if recipient is not None: + pulumi.set(__self__, "recipient", recipient) + if schema is not None: + pulumi.set(__self__, "schema", schema) + if share is not None: + pulumi.set(__self__, "share", share) + if storage_credential is not None: + pulumi.set(__self__, "storage_credential", storage_credential) + if table is not None: + pulumi.set(__self__, "table", table) + if volume is not None: + pulumi.set(__self__, "volume", volume) + + @property + @pulumi.getter + def principal(self) -> pulumi.Input[str]: + return pulumi.get(self, "principal") + + @principal.setter + def principal(self, value: pulumi.Input[str]): + pulumi.set(self, "principal", value) + + @property + @pulumi.getter + def privileges(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: + return pulumi.get(self, "privileges") + + @privileges.setter + def privileges(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): + pulumi.set(self, "privileges", value) + + @property + @pulumi.getter + def catalog(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "catalog") + + @catalog.setter + def catalog(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "catalog", value) + + @property + @pulumi.getter(name="externalLocation") + def external_location(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "external_location") + + @external_location.setter + def external_location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "external_location", value) + + @property + @pulumi.getter(name="foreignConnection") + def foreign_connection(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "foreign_connection") + + @foreign_connection.setter + def foreign_connection(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "foreign_connection", value) + + @property + @pulumi.getter + def function(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "function") + + @function.setter + def function(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "function", value) + + @property + @pulumi.getter + def metastore(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "metastore") + + @metastore.setter + def metastore(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metastore", value) + + @property + @pulumi.getter + def model(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "model") + + @model.setter + def model(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "model", value) + + @property + @pulumi.getter + def pipeline(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "pipeline") + + @pipeline.setter + def pipeline(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "pipeline", value) + + @property + @pulumi.getter + def recipient(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "recipient") + + @recipient.setter + def recipient(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "recipient", value) + + @property + @pulumi.getter + def schema(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "schema") + + @schema.setter + def schema(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "schema", value) + + @property + @pulumi.getter + def share(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "share") + + @share.setter + def share(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "share", value) + + @property + @pulumi.getter(name="storageCredential") + def storage_credential(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "storage_credential") + + @storage_credential.setter + def storage_credential(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "storage_credential", value) + + @property + @pulumi.getter + def table(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "table") + + @table.setter + def table(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "table", value) + + @property + @pulumi.getter + def volume(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "volume") + + @volume.setter + def volume(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "volume", value) + + +@pulumi.input_type +class _GrantState: + def __init__(__self__, *, + catalog: Optional[pulumi.Input[str]] = None, + external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, + function: Optional[pulumi.Input[str]] = None, + metastore: Optional[pulumi.Input[str]] = None, + model: Optional[pulumi.Input[str]] = None, + pipeline: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + privileges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + recipient: Optional[pulumi.Input[str]] = None, + schema: Optional[pulumi.Input[str]] = None, + share: Optional[pulumi.Input[str]] = None, + storage_credential: Optional[pulumi.Input[str]] = None, + table: Optional[pulumi.Input[str]] = None, + volume: Optional[pulumi.Input[str]] = None): + """ + Input properties used for looking up and filtering Grant resources. + """ + if catalog is not None: + pulumi.set(__self__, "catalog", catalog) + if external_location is not None: + pulumi.set(__self__, "external_location", external_location) + if foreign_connection is not None: + pulumi.set(__self__, "foreign_connection", foreign_connection) + if function is not None: + pulumi.set(__self__, "function", function) + if metastore is not None: + pulumi.set(__self__, "metastore", metastore) + if model is not None: + pulumi.set(__self__, "model", model) + if pipeline is not None: + pulumi.set(__self__, "pipeline", pipeline) + if principal is not None: + pulumi.set(__self__, "principal", principal) + if privileges is not None: + pulumi.set(__self__, "privileges", privileges) + if recipient is not None: + pulumi.set(__self__, "recipient", recipient) + if schema is not None: + pulumi.set(__self__, "schema", schema) + if share is not None: + pulumi.set(__self__, "share", share) + if storage_credential is not None: + pulumi.set(__self__, "storage_credential", storage_credential) + if table is not None: + pulumi.set(__self__, "table", table) + if volume is not None: + pulumi.set(__self__, "volume", volume) + + @property + @pulumi.getter + def catalog(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "catalog") + + @catalog.setter + def catalog(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "catalog", value) + + @property + @pulumi.getter(name="externalLocation") + def external_location(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "external_location") + + @external_location.setter + def external_location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "external_location", value) + + @property + @pulumi.getter(name="foreignConnection") + def foreign_connection(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "foreign_connection") + + @foreign_connection.setter + def foreign_connection(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "foreign_connection", value) + + @property + @pulumi.getter + def function(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "function") + + @function.setter + def function(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "function", value) + + @property + @pulumi.getter + def metastore(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "metastore") + + @metastore.setter + def metastore(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metastore", value) + + @property + @pulumi.getter + def model(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "model") + + @model.setter + def model(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "model", value) + + @property + @pulumi.getter + def pipeline(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "pipeline") + + @pipeline.setter + def pipeline(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "pipeline", value) + + @property + @pulumi.getter + def principal(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "principal") + + @principal.setter + def principal(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "principal", value) + + @property + @pulumi.getter + def privileges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + return pulumi.get(self, "privileges") + + @privileges.setter + def privileges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "privileges", value) + + @property + @pulumi.getter + def recipient(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "recipient") + + @recipient.setter + def recipient(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "recipient", value) + + @property + @pulumi.getter + def schema(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "schema") + + @schema.setter + def schema(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "schema", value) + + @property + @pulumi.getter + def share(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "share") + + @share.setter + def share(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "share", value) + + @property + @pulumi.getter(name="storageCredential") + def storage_credential(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "storage_credential") + + @storage_credential.setter + def storage_credential(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "storage_credential", value) + + @property + @pulumi.getter + def table(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "table") + + @table.setter + def table(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "table", value) + + @property + @pulumi.getter + def volume(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "volume") + + @volume.setter + def volume(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "volume", value) + + +class Grant(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + catalog: Optional[pulumi.Input[str]] = None, + external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, + function: Optional[pulumi.Input[str]] = None, + metastore: Optional[pulumi.Input[str]] = None, + model: Optional[pulumi.Input[str]] = None, + pipeline: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + privileges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + recipient: Optional[pulumi.Input[str]] = None, + schema: Optional[pulumi.Input[str]] = None, + share: Optional[pulumi.Input[str]] = None, + storage_credential: Optional[pulumi.Input[str]] = None, + table: Optional[pulumi.Input[str]] = None, + volume: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + Create a Grant resource with the given unique name, props, and options. + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: GrantArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + Create a Grant resource with the given unique name, props, and options. + :param str resource_name: The name of the resource. + :param GrantArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(GrantArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + catalog: Optional[pulumi.Input[str]] = None, + external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, + function: Optional[pulumi.Input[str]] = None, + metastore: Optional[pulumi.Input[str]] = None, + model: Optional[pulumi.Input[str]] = None, + pipeline: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + privileges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + recipient: Optional[pulumi.Input[str]] = None, + schema: Optional[pulumi.Input[str]] = None, + share: Optional[pulumi.Input[str]] = None, + storage_credential: Optional[pulumi.Input[str]] = None, + table: Optional[pulumi.Input[str]] = None, + volume: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = GrantArgs.__new__(GrantArgs) + + __props__.__dict__["catalog"] = catalog + __props__.__dict__["external_location"] = external_location + __props__.__dict__["foreign_connection"] = foreign_connection + __props__.__dict__["function"] = function + __props__.__dict__["metastore"] = metastore + __props__.__dict__["model"] = model + __props__.__dict__["pipeline"] = pipeline + if principal is None and not opts.urn: + raise TypeError("Missing required property 'principal'") + __props__.__dict__["principal"] = principal + if privileges is None and not opts.urn: + raise TypeError("Missing required property 'privileges'") + __props__.__dict__["privileges"] = privileges + __props__.__dict__["recipient"] = recipient + __props__.__dict__["schema"] = schema + __props__.__dict__["share"] = share + __props__.__dict__["storage_credential"] = storage_credential + __props__.__dict__["table"] = table + __props__.__dict__["volume"] = volume + super(Grant, __self__).__init__( + 'databricks:index/grant:Grant', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + catalog: Optional[pulumi.Input[str]] = None, + external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, + function: Optional[pulumi.Input[str]] = None, + metastore: Optional[pulumi.Input[str]] = None, + model: Optional[pulumi.Input[str]] = None, + pipeline: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + privileges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + recipient: Optional[pulumi.Input[str]] = None, + schema: Optional[pulumi.Input[str]] = None, + share: Optional[pulumi.Input[str]] = None, + storage_credential: Optional[pulumi.Input[str]] = None, + table: Optional[pulumi.Input[str]] = None, + volume: Optional[pulumi.Input[str]] = None) -> 'Grant': + """ + Get an existing Grant resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _GrantState.__new__(_GrantState) + + __props__.__dict__["catalog"] = catalog + __props__.__dict__["external_location"] = external_location + __props__.__dict__["foreign_connection"] = foreign_connection + __props__.__dict__["function"] = function + __props__.__dict__["metastore"] = metastore + __props__.__dict__["model"] = model + __props__.__dict__["pipeline"] = pipeline + __props__.__dict__["principal"] = principal + __props__.__dict__["privileges"] = privileges + __props__.__dict__["recipient"] = recipient + __props__.__dict__["schema"] = schema + __props__.__dict__["share"] = share + __props__.__dict__["storage_credential"] = storage_credential + __props__.__dict__["table"] = table + __props__.__dict__["volume"] = volume + return Grant(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter + def catalog(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "catalog") + + @property + @pulumi.getter(name="externalLocation") + def external_location(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "external_location") + + @property + @pulumi.getter(name="foreignConnection") + def foreign_connection(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "foreign_connection") + + @property + @pulumi.getter + def function(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "function") + + @property + @pulumi.getter + def metastore(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "metastore") + + @property + @pulumi.getter + def model(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "model") + + @property + @pulumi.getter + def pipeline(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "pipeline") + + @property + @pulumi.getter + def principal(self) -> pulumi.Output[str]: + return pulumi.get(self, "principal") + + @property + @pulumi.getter + def privileges(self) -> pulumi.Output[Sequence[str]]: + return pulumi.get(self, "privileges") + + @property + @pulumi.getter + def recipient(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "recipient") + + @property + @pulumi.getter + def schema(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "schema") + + @property + @pulumi.getter + def share(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "share") + + @property + @pulumi.getter(name="storageCredential") + def storage_credential(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "storage_credential") + + @property + @pulumi.getter + def table(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "table") + + @property + @pulumi.getter + def volume(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "volume") + diff --git a/sdk/python/pulumi_databricks/metastore.py b/sdk/python/pulumi_databricks/metastore.py index 0d4744dd..be42cbea 100644 --- a/sdk/python/pulumi_databricks/metastore.py +++ b/sdk/python/pulumi_databricks/metastore.py @@ -523,6 +523,8 @@ def __init__(__self__, updated_by: Optional[pulumi.Input[str]] = None, __props__=None): """ + > **Note** This resource could be used with account or workspace-level provider. + A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). @@ -555,6 +557,8 @@ def __init__(__self__, args: Optional[MetastoreArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be used with account or workspace-level provider. + A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). diff --git a/sdk/python/pulumi_databricks/metastore_assignment.py b/sdk/python/pulumi_databricks/metastore_assignment.py index 213c6c85..34fe86f4 100644 --- a/sdk/python/pulumi_databricks/metastore_assignment.py +++ b/sdk/python/pulumi_databricks/metastore_assignment.py @@ -131,6 +131,8 @@ def __init__(__self__, workspace_id: Optional[pulumi.Input[int]] = None, __props__=None): """ + > **Note** This resource could be only used with account-level provider! + A single Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. ## Example Usage @@ -170,6 +172,8 @@ def __init__(__self__, args: MetastoreAssignmentArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with account-level provider! + A single Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. ## Example Usage diff --git a/sdk/python/pulumi_databricks/metastore_data_access.py b/sdk/python/pulumi_databricks/metastore_data_access.py index d0e6e29c..54d082f2 100644 --- a/sdk/python/pulumi_databricks/metastore_data_access.py +++ b/sdk/python/pulumi_databricks/metastore_data_access.py @@ -28,7 +28,8 @@ def __init__(__self__, *, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, - read_only: Optional[pulumi.Input[bool]] = None): + read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a MetastoreDataAccess resource. :param pulumi.Input[bool] is_default: whether to set this credential as the default for the metastore. In practice, this should always be true. @@ -59,6 +60,8 @@ def __init__(__self__, *, pulumi.set(__self__, "owner", owner) if read_only is not None: pulumi.set(__self__, "read_only", read_only) + if skip_validation is not None: + pulumi.set(__self__, "skip_validation", skip_validation) @property @pulumi.getter(name="awsIamRole") @@ -180,6 +183,15 @@ def read_only(self) -> Optional[pulumi.Input[bool]]: def read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "read_only", value) + @property + @pulumi.getter(name="skipValidation") + def skip_validation(self) -> Optional[pulumi.Input[bool]]: + return pulumi.get(self, "skip_validation") + + @skip_validation.setter + def skip_validation(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "skip_validation", value) + @pulumi.input_type class _MetastoreDataAccessState: @@ -196,7 +208,8 @@ def __init__(__self__, *, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, - read_only: Optional[pulumi.Input[bool]] = None): + read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None): """ Input properties used for looking up and filtering MetastoreDataAccess resources. :param pulumi.Input[bool] is_default: whether to set this credential as the default for the metastore. In practice, this should always be true. @@ -227,6 +240,8 @@ def __init__(__self__, *, pulumi.set(__self__, "owner", owner) if read_only is not None: pulumi.set(__self__, "read_only", read_only) + if skip_validation is not None: + pulumi.set(__self__, "skip_validation", skip_validation) @property @pulumi.getter(name="awsIamRole") @@ -348,6 +363,15 @@ def read_only(self) -> Optional[pulumi.Input[bool]]: def read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "read_only", value) + @property + @pulumi.getter(name="skipValidation") + def skip_validation(self) -> Optional[pulumi.Input[bool]]: + return pulumi.get(self, "skip_validation") + + @skip_validation.setter + def skip_validation(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "skip_validation", value) + class MetastoreDataAccess(pulumi.CustomResource): @overload @@ -367,8 +391,11 @@ def __init__(__self__, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None, __props__=None): """ + > **Note** This resource could be used with account or workspace-level provider. + Optionally, each Metastore can have a default StorageCredential defined as `MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined. ## Import @@ -390,6 +417,8 @@ def __init__(__self__, args: Optional[MetastoreDataAccessArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be used with account or workspace-level provider. + Optionally, each Metastore can have a default StorageCredential defined as `MetastoreDataAccess`. This will be used by Unity Catalog to access data in the root storage location if defined. ## Import @@ -428,6 +457,7 @@ def _internal_init(__self__, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -450,6 +480,7 @@ def _internal_init(__self__, __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner __props__.__dict__["read_only"] = read_only + __props__.__dict__["skip_validation"] = skip_validation super(MetastoreDataAccess, __self__).__init__( 'databricks:index/metastoreDataAccess:MetastoreDataAccess', resource_name, @@ -472,7 +503,8 @@ def get(resource_name: str, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, - read_only: Optional[pulumi.Input[bool]] = None) -> 'MetastoreDataAccess': + read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None) -> 'MetastoreDataAccess': """ Get an existing MetastoreDataAccess resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -499,6 +531,7 @@ def get(resource_name: str, __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner __props__.__dict__["read_only"] = read_only + __props__.__dict__["skip_validation"] = skip_validation return MetastoreDataAccess(resource_name, opts=opts, __props__=__props__) @property @@ -569,3 +602,8 @@ def owner(self) -> pulumi.Output[str]: def read_only(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "read_only") + @property + @pulumi.getter(name="skipValidation") + def skip_validation(self) -> pulumi.Output[Optional[bool]]: + return pulumi.get(self, "skip_validation") + diff --git a/sdk/python/pulumi_databricks/metastore_provider.py b/sdk/python/pulumi_databricks/metastore_provider.py index 77af8672..0938880d 100644 --- a/sdk/python/pulumi_databricks/metastore_provider.py +++ b/sdk/python/pulumi_databricks/metastore_provider.py @@ -164,6 +164,8 @@ def __init__(__self__, recipient_profile_str: Optional[pulumi.Input[str]] = None, __props__=None): """ + > **Note** This resource could be only used with workspace-level provider! + Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. A `MetastoreProvider` is contained within Metastore and can contain a list of shares that have been shared with you. @@ -209,6 +211,8 @@ def __init__(__self__, args: MetastoreProviderArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with workspace-level provider! + Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. A `MetastoreProvider` is contained within Metastore and can contain a list of shares that have been shared with you. diff --git a/sdk/python/pulumi_databricks/outputs.py b/sdk/python/pulumi_databricks/outputs.py index 5e5a88ab..09603a89 100644 --- a/sdk/python/pulumi_databricks/outputs.py +++ b/sdk/python/pulumi_databricks/outputs.py @@ -255,6 +255,8 @@ 'ShareObjectPartitionValue', 'SqlAlertOptions', 'SqlEndpointChannel', + 'SqlEndpointHealth', + 'SqlEndpointHealthFailureReason', 'SqlEndpointOdbcParams', 'SqlEndpointTags', 'SqlEndpointTagsCustomTag', @@ -312,6 +314,7 @@ 'GetClusterClusterInfoInitScriptVolumesResult', 'GetClusterClusterInfoInitScriptWorkspaceResult', 'GetClusterClusterInfoTerminationReasonResult', + 'GetCurrentMetastoreMetastoreInfoResult', 'GetDbfsFilePathsPathListResult', 'GetInstancePoolPoolInfoResult', 'GetInstancePoolPoolInfoAwsAttributesResult', @@ -469,6 +472,8 @@ 'GetShareObjectPartitionResult', 'GetShareObjectPartitionValueResult', 'GetSqlWarehouseChannelResult', + 'GetSqlWarehouseHealthResult', + 'GetSqlWarehouseHealthFailureReasonResult', 'GetSqlWarehouseOdbcParamsResult', 'GetSqlWarehouseTagsResult', 'GetSqlWarehouseTagsCustomTagResult', @@ -13082,14 +13087,39 @@ def muted(self) -> Optional[bool]: @pulumi.output_type class SqlEndpointChannel(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "dbsqlVersion": + suggest = "dbsql_version" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in SqlEndpointChannel. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + SqlEndpointChannel.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + SqlEndpointChannel.__key_warning(key) + return super().get(key, default) + def __init__(__self__, *, + dbsql_version: Optional[str] = None, name: Optional[str] = None): """ :param str name: Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. """ + if dbsql_version is not None: + pulumi.set(__self__, "dbsql_version", dbsql_version) if name is not None: pulumi.set(__self__, "name", name) + @property + @pulumi.getter(name="dbsqlVersion") + def dbsql_version(self) -> Optional[str]: + return pulumi.get(self, "dbsql_version") + @property @pulumi.getter def name(self) -> Optional[str]: @@ -13100,46 +13130,132 @@ def name(self) -> Optional[str]: @pulumi.output_type -class SqlEndpointOdbcParams(dict): +class SqlEndpointHealth(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "failureReason": + suggest = "failure_reason" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in SqlEndpointHealth. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + SqlEndpointHealth.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + SqlEndpointHealth.__key_warning(key) + return super().get(key, default) + def __init__(__self__, *, - path: str, - port: int, - protocol: str, - host: Optional[str] = None, - hostname: Optional[str] = None): - pulumi.set(__self__, "path", path) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "protocol", protocol) - if host is not None: - pulumi.set(__self__, "host", host) - if hostname is not None: - pulumi.set(__self__, "hostname", hostname) + details: Optional[str] = None, + failure_reason: Optional['outputs.SqlEndpointHealthFailureReason'] = None, + message: Optional[str] = None, + status: Optional[str] = None, + summary: Optional[str] = None): + if details is not None: + pulumi.set(__self__, "details", details) + if failure_reason is not None: + pulumi.set(__self__, "failure_reason", failure_reason) + if message is not None: + pulumi.set(__self__, "message", message) + if status is not None: + pulumi.set(__self__, "status", status) + if summary is not None: + pulumi.set(__self__, "summary", summary) @property @pulumi.getter - def path(self) -> str: - return pulumi.get(self, "path") + def details(self) -> Optional[str]: + return pulumi.get(self, "details") + + @property + @pulumi.getter(name="failureReason") + def failure_reason(self) -> Optional['outputs.SqlEndpointHealthFailureReason']: + return pulumi.get(self, "failure_reason") @property @pulumi.getter - def port(self) -> int: - return pulumi.get(self, "port") + def message(self) -> Optional[str]: + return pulumi.get(self, "message") @property @pulumi.getter - def protocol(self) -> str: - return pulumi.get(self, "protocol") + def status(self) -> Optional[str]: + return pulumi.get(self, "status") + + @property + @pulumi.getter + def summary(self) -> Optional[str]: + return pulumi.get(self, "summary") + + +@pulumi.output_type +class SqlEndpointHealthFailureReason(dict): + def __init__(__self__, *, + code: Optional[str] = None, + parameters: Optional[Mapping[str, Any]] = None, + type: Optional[str] = None): + if code is not None: + pulumi.set(__self__, "code", code) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) + if type is not None: + pulumi.set(__self__, "type", type) @property @pulumi.getter - def host(self) -> Optional[str]: - return pulumi.get(self, "host") + def code(self) -> Optional[str]: + return pulumi.get(self, "code") + + @property + @pulumi.getter + def parameters(self) -> Optional[Mapping[str, Any]]: + return pulumi.get(self, "parameters") + + @property + @pulumi.getter + def type(self) -> Optional[str]: + return pulumi.get(self, "type") + + +@pulumi.output_type +class SqlEndpointOdbcParams(dict): + def __init__(__self__, *, + hostname: Optional[str] = None, + path: Optional[str] = None, + port: Optional[int] = None, + protocol: Optional[str] = None): + if hostname is not None: + pulumi.set(__self__, "hostname", hostname) + if path is not None: + pulumi.set(__self__, "path", path) + if port is not None: + pulumi.set(__self__, "port", port) + if protocol is not None: + pulumi.set(__self__, "protocol", protocol) @property @pulumi.getter def hostname(self) -> Optional[str]: return pulumi.get(self, "hostname") + @property + @pulumi.getter + def path(self) -> Optional[str]: + return pulumi.get(self, "path") + + @property + @pulumi.getter + def port(self) -> Optional[int]: + return pulumi.get(self, "port") + + @property + @pulumi.getter + def protocol(self) -> Optional[str]: + return pulumi.get(self, "protocol") + @pulumi.output_type class SqlEndpointTags(dict): @@ -13161,12 +13277,13 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, - custom_tags: Sequence['outputs.SqlEndpointTagsCustomTag']): - pulumi.set(__self__, "custom_tags", custom_tags) + custom_tags: Optional[Sequence['outputs.SqlEndpointTagsCustomTag']] = None): + if custom_tags is not None: + pulumi.set(__self__, "custom_tags", custom_tags) @property @pulumi.getter(name="customTags") - def custom_tags(self) -> Sequence['outputs.SqlEndpointTagsCustomTag']: + def custom_tags(self) -> Optional[Sequence['outputs.SqlEndpointTagsCustomTag']]: return pulumi.get(self, "custom_tags") @@ -15642,6 +15759,225 @@ def type(self) -> Optional[str]: return pulumi.get(self, "type") +@pulumi.output_type +class GetCurrentMetastoreMetastoreInfoResult(dict): + def __init__(__self__, *, + cloud: Optional[str] = None, + created_at: Optional[int] = None, + created_by: Optional[str] = None, + default_data_access_config_id: Optional[str] = None, + delta_sharing_organization_name: Optional[str] = None, + delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None, + delta_sharing_scope: Optional[str] = None, + global_metastore_id: Optional[str] = None, + metastore_id: Optional[str] = None, + name: Optional[str] = None, + owner: Optional[str] = None, + privilege_model_version: Optional[str] = None, + region: Optional[str] = None, + storage_root: Optional[str] = None, + storage_root_credential_id: Optional[str] = None, + storage_root_credential_name: Optional[str] = None, + updated_at: Optional[int] = None, + updated_by: Optional[str] = None): + """ + :param int created_at: Timestamp (in milliseconds) when the current metastore was created. + :param str created_by: the ID of the identity that created the current metastore. + :param str default_data_access_config_id: the ID of the default data access configuration. + :param str delta_sharing_organization_name: The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + :param int delta_sharing_recipient_token_lifetime_in_seconds: the expiration duration in seconds on recipient data access tokens. + :param str delta_sharing_scope: Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + :param str global_metastore_id: Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + :param str metastore_id: Metastore ID. + :param str name: Name of metastore. + :param str owner: Username/group name/sp application_id of the metastore owner. + :param str privilege_model_version: the version of the privilege model used by the metastore. + :param str region: (Mandatory for account-level) The region of the metastore. + :param str storage_root: Path on cloud storage account, where managed `Table` are stored. + :param str storage_root_credential_id: ID of a storage credential used for the `storage_root`. + :param str storage_root_credential_name: Name of a storage credential used for the `storage_root`. + :param int updated_at: Timestamp (in milliseconds) when the current metastore was updated. + :param str updated_by: the ID of the identity that updated the current metastore. + """ + if cloud is not None: + pulumi.set(__self__, "cloud", cloud) + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) + if created_by is not None: + pulumi.set(__self__, "created_by", created_by) + if default_data_access_config_id is not None: + pulumi.set(__self__, "default_data_access_config_id", default_data_access_config_id) + if delta_sharing_organization_name is not None: + pulumi.set(__self__, "delta_sharing_organization_name", delta_sharing_organization_name) + if delta_sharing_recipient_token_lifetime_in_seconds is not None: + pulumi.set(__self__, "delta_sharing_recipient_token_lifetime_in_seconds", delta_sharing_recipient_token_lifetime_in_seconds) + if delta_sharing_scope is not None: + pulumi.set(__self__, "delta_sharing_scope", delta_sharing_scope) + if global_metastore_id is not None: + pulumi.set(__self__, "global_metastore_id", global_metastore_id) + if metastore_id is not None: + pulumi.set(__self__, "metastore_id", metastore_id) + if name is not None: + pulumi.set(__self__, "name", name) + if owner is not None: + pulumi.set(__self__, "owner", owner) + if privilege_model_version is not None: + pulumi.set(__self__, "privilege_model_version", privilege_model_version) + if region is not None: + pulumi.set(__self__, "region", region) + if storage_root is not None: + pulumi.set(__self__, "storage_root", storage_root) + if storage_root_credential_id is not None: + pulumi.set(__self__, "storage_root_credential_id", storage_root_credential_id) + if storage_root_credential_name is not None: + pulumi.set(__self__, "storage_root_credential_name", storage_root_credential_name) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) + if updated_by is not None: + pulumi.set(__self__, "updated_by", updated_by) + + @property + @pulumi.getter + def cloud(self) -> Optional[str]: + return pulumi.get(self, "cloud") + + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[int]: + """ + Timestamp (in milliseconds) when the current metastore was created. + """ + return pulumi.get(self, "created_at") + + @property + @pulumi.getter(name="createdBy") + def created_by(self) -> Optional[str]: + """ + the ID of the identity that created the current metastore. + """ + return pulumi.get(self, "created_by") + + @property + @pulumi.getter(name="defaultDataAccessConfigId") + def default_data_access_config_id(self) -> Optional[str]: + """ + the ID of the default data access configuration. + """ + return pulumi.get(self, "default_data_access_config_id") + + @property + @pulumi.getter(name="deltaSharingOrganizationName") + def delta_sharing_organization_name(self) -> Optional[str]: + """ + The organization name of a Delta Sharing entity. This field is used for Databricks to Databricks sharing. + """ + return pulumi.get(self, "delta_sharing_organization_name") + + @property + @pulumi.getter(name="deltaSharingRecipientTokenLifetimeInSeconds") + def delta_sharing_recipient_token_lifetime_in_seconds(self) -> Optional[int]: + """ + the expiration duration in seconds on recipient data access tokens. + """ + return pulumi.get(self, "delta_sharing_recipient_token_lifetime_in_seconds") + + @property + @pulumi.getter(name="deltaSharingScope") + def delta_sharing_scope(self) -> Optional[str]: + """ + Used to enable delta sharing on the metastore. Valid values: INTERNAL, INTERNAL_AND_EXTERNAL. + """ + return pulumi.get(self, "delta_sharing_scope") + + @property + @pulumi.getter(name="globalMetastoreId") + def global_metastore_id(self) -> Optional[str]: + """ + Identifier in form of `::` for use in Databricks to Databricks Delta Sharing. + """ + return pulumi.get(self, "global_metastore_id") + + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> Optional[str]: + """ + Metastore ID. + """ + return pulumi.get(self, "metastore_id") + + @property + @pulumi.getter + def name(self) -> Optional[str]: + """ + Name of metastore. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def owner(self) -> Optional[str]: + """ + Username/group name/sp application_id of the metastore owner. + """ + return pulumi.get(self, "owner") + + @property + @pulumi.getter(name="privilegeModelVersion") + def privilege_model_version(self) -> Optional[str]: + """ + the version of the privilege model used by the metastore. + """ + return pulumi.get(self, "privilege_model_version") + + @property + @pulumi.getter + def region(self) -> Optional[str]: + """ + (Mandatory for account-level) The region of the metastore. + """ + return pulumi.get(self, "region") + + @property + @pulumi.getter(name="storageRoot") + def storage_root(self) -> Optional[str]: + """ + Path on cloud storage account, where managed `Table` are stored. + """ + return pulumi.get(self, "storage_root") + + @property + @pulumi.getter(name="storageRootCredentialId") + def storage_root_credential_id(self) -> Optional[str]: + """ + ID of a storage credential used for the `storage_root`. + """ + return pulumi.get(self, "storage_root_credential_id") + + @property + @pulumi.getter(name="storageRootCredentialName") + def storage_root_credential_name(self) -> Optional[str]: + """ + Name of a storage credential used for the `storage_root`. + """ + return pulumi.get(self, "storage_root_credential_name") + + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[int]: + """ + Timestamp (in milliseconds) when the current metastore was updated. + """ + return pulumi.get(self, "updated_at") + + @property + @pulumi.getter(name="updatedBy") + def updated_by(self) -> Optional[str]: + """ + the ID of the identity that updated the current metastore. + """ + return pulumi.get(self, "updated_by") + + @pulumi.output_type class GetDbfsFilePathsPathListResult(dict): def __init__(__self__, *, @@ -20866,7 +21202,7 @@ def __init__(__self__, *, :param str metastore_id: Id of the metastore to be fetched :param str name: Name of metastore. :param str owner: Username/groupname/sp application_id of the metastore owner. - :param str storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. + :param str storage_root: Path on cloud storage account, where managed `Table` are stored. """ if cloud is not None: pulumi.set(__self__, "cloud", cloud) @@ -20992,7 +21328,7 @@ def region(self) -> Optional[str]: @pulumi.getter(name="storageRoot") def storage_root(self) -> Optional[str]: """ - Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. + Path on cloud storage account, where managed `Table` are stored. """ return pulumi.get(self, "storage_root") @@ -21371,13 +21707,21 @@ def value(self) -> Optional[str]: @pulumi.output_type class GetSqlWarehouseChannelResult(dict): def __init__(__self__, *, + dbsql_version: Optional[str] = None, name: Optional[str] = None): """ :param str name: Name of the SQL warehouse to search (case-sensitive). """ + if dbsql_version is not None: + pulumi.set(__self__, "dbsql_version", dbsql_version) if name is not None: pulumi.set(__self__, "name", name) + @property + @pulumi.getter(name="dbsqlVersion") + def dbsql_version(self) -> Optional[str]: + return pulumi.get(self, "dbsql_version") + @property @pulumi.getter def name(self) -> Optional[str]: @@ -21388,75 +21732,147 @@ def name(self) -> Optional[str]: @pulumi.output_type -class GetSqlWarehouseOdbcParamsResult(dict): +class GetSqlWarehouseHealthResult(dict): def __init__(__self__, *, - path: str, - port: int, - protocol: str, - host: Optional[str] = None, - hostname: Optional[str] = None): - pulumi.set(__self__, "path", path) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "protocol", protocol) - if host is not None: - pulumi.set(__self__, "host", host) - if hostname is not None: - pulumi.set(__self__, "hostname", hostname) + details: Optional[str] = None, + failure_reason: Optional['outputs.GetSqlWarehouseHealthFailureReasonResult'] = None, + message: Optional[str] = None, + status: Optional[str] = None, + summary: Optional[str] = None): + if details is not None: + pulumi.set(__self__, "details", details) + if failure_reason is not None: + pulumi.set(__self__, "failure_reason", failure_reason) + if message is not None: + pulumi.set(__self__, "message", message) + if status is not None: + pulumi.set(__self__, "status", status) + if summary is not None: + pulumi.set(__self__, "summary", summary) @property @pulumi.getter - def path(self) -> str: - return pulumi.get(self, "path") + def details(self) -> Optional[str]: + return pulumi.get(self, "details") + + @property + @pulumi.getter(name="failureReason") + def failure_reason(self) -> Optional['outputs.GetSqlWarehouseHealthFailureReasonResult']: + return pulumi.get(self, "failure_reason") @property @pulumi.getter - def port(self) -> int: - return pulumi.get(self, "port") + def message(self) -> Optional[str]: + return pulumi.get(self, "message") @property @pulumi.getter - def protocol(self) -> str: - return pulumi.get(self, "protocol") + def status(self) -> Optional[str]: + return pulumi.get(self, "status") @property @pulumi.getter - def host(self) -> Optional[str]: - return pulumi.get(self, "host") + def summary(self) -> Optional[str]: + return pulumi.get(self, "summary") + + +@pulumi.output_type +class GetSqlWarehouseHealthFailureReasonResult(dict): + def __init__(__self__, *, + code: Optional[str] = None, + parameters: Optional[Mapping[str, Any]] = None, + type: Optional[str] = None): + if code is not None: + pulumi.set(__self__, "code", code) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) + if type is not None: + pulumi.set(__self__, "type", type) + + @property + @pulumi.getter + def code(self) -> Optional[str]: + return pulumi.get(self, "code") + + @property + @pulumi.getter + def parameters(self) -> Optional[Mapping[str, Any]]: + return pulumi.get(self, "parameters") + + @property + @pulumi.getter + def type(self) -> Optional[str]: + return pulumi.get(self, "type") + + +@pulumi.output_type +class GetSqlWarehouseOdbcParamsResult(dict): + def __init__(__self__, *, + hostname: Optional[str] = None, + path: Optional[str] = None, + port: Optional[int] = None, + protocol: Optional[str] = None): + if hostname is not None: + pulumi.set(__self__, "hostname", hostname) + if path is not None: + pulumi.set(__self__, "path", path) + if port is not None: + pulumi.set(__self__, "port", port) + if protocol is not None: + pulumi.set(__self__, "protocol", protocol) @property @pulumi.getter def hostname(self) -> Optional[str]: return pulumi.get(self, "hostname") + @property + @pulumi.getter + def path(self) -> Optional[str]: + return pulumi.get(self, "path") + + @property + @pulumi.getter + def port(self) -> Optional[int]: + return pulumi.get(self, "port") + + @property + @pulumi.getter + def protocol(self) -> Optional[str]: + return pulumi.get(self, "protocol") + @pulumi.output_type class GetSqlWarehouseTagsResult(dict): def __init__(__self__, *, - custom_tags: Sequence['outputs.GetSqlWarehouseTagsCustomTagResult']): - pulumi.set(__self__, "custom_tags", custom_tags) + custom_tags: Optional[Sequence['outputs.GetSqlWarehouseTagsCustomTagResult']] = None): + if custom_tags is not None: + pulumi.set(__self__, "custom_tags", custom_tags) @property @pulumi.getter(name="customTags") - def custom_tags(self) -> Sequence['outputs.GetSqlWarehouseTagsCustomTagResult']: + def custom_tags(self) -> Optional[Sequence['outputs.GetSqlWarehouseTagsCustomTagResult']]: return pulumi.get(self, "custom_tags") @pulumi.output_type class GetSqlWarehouseTagsCustomTagResult(dict): def __init__(__self__, *, - key: str, - value: str): - pulumi.set(__self__, "key", key) - pulumi.set(__self__, "value", value) + key: Optional[str] = None, + value: Optional[str] = None): + if key is not None: + pulumi.set(__self__, "key", key) + if value is not None: + pulumi.set(__self__, "value", value) @property @pulumi.getter - def key(self) -> str: + def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter - def value(self) -> str: + def value(self) -> Optional[str]: return pulumi.get(self, "value") diff --git a/sdk/python/pulumi_databricks/recipient.py b/sdk/python/pulumi_databricks/recipient.py index 276d8cc0..65d759f0 100644 --- a/sdk/python/pulumi_databricks/recipient.py +++ b/sdk/python/pulumi_databricks/recipient.py @@ -299,6 +299,8 @@ def __init__(__self__, tokens: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RecipientTokenArgs']]]]] = None, __props__=None): """ + > **Note** This resource could be only used with workspace-level provider! + Within a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to. A `Recipient` is contained within Metastore and can have permissions to `SELECT` from a list of shares. @@ -352,6 +354,8 @@ def __init__(__self__, args: RecipientArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with workspace-level provider! + Within a metastore, Unity Catalog provides the ability to create a recipient to attach delta shares to. A `Recipient` is contained within Metastore and can have permissions to `SELECT` from a list of shares. diff --git a/sdk/python/pulumi_databricks/registered_model.py b/sdk/python/pulumi_databricks/registered_model.py index b8786b02..4453fd1d 100644 --- a/sdk/python/pulumi_databricks/registered_model.py +++ b/sdk/python/pulumi_databricks/registered_model.py @@ -189,6 +189,8 @@ def __init__(__self__, storage_location: Optional[pulumi.Input[str]] = None, __props__=None): """ + > **Note** This resource could be only used with workspace-level provider! + This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. ## Example Usage @@ -237,6 +239,8 @@ def __init__(__self__, args: RegisteredModelArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with workspace-level provider! + This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. ## Example Usage diff --git a/sdk/python/pulumi_databricks/repo.py b/sdk/python/pulumi_databricks/repo.py index 7e3b4979..905b58c7 100644 --- a/sdk/python/pulumi_databricks/repo.py +++ b/sdk/python/pulumi_databricks/repo.py @@ -137,7 +137,8 @@ def __init__(__self__, *, path: Optional[pulumi.Input[str]] = None, sparse_checkout: Optional[pulumi.Input['RepoSparseCheckoutArgs']] = None, tag: Optional[pulumi.Input[str]] = None, - url: Optional[pulumi.Input[str]] = None): + url: Optional[pulumi.Input[str]] = None, + workspace_path: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Repo resources. :param pulumi.Input[str] branch: name of the branch for initial checkout. If not specified, the default branch of the repository will be used. Conflicts with `tag`. If `branch` is removed, and `tag` isn't specified, then the repository will stay at the previously checked out state. @@ -146,6 +147,7 @@ def __init__(__self__, *, :param pulumi.Input[str] path: path to put the checked out Repo. If not specified, then repo will be created in the user's repo directory (`/Repos//...`). If the value changes, repo is re-created. :param pulumi.Input[str] tag: name of the tag for initial checkout. Conflicts with `branch`. :param pulumi.Input[str] url: The URL of the Git Repository to clone from. If the value changes, repo is re-created. + :param pulumi.Input[str] workspace_path: path on Workspace File System (WSFS) in form of `/Workspace` + `path` """ if branch is not None: pulumi.set(__self__, "branch", branch) @@ -161,6 +163,8 @@ def __init__(__self__, *, pulumi.set(__self__, "tag", tag) if url is not None: pulumi.set(__self__, "url", url) + if workspace_path is not None: + pulumi.set(__self__, "workspace_path", workspace_path) @property @pulumi.getter @@ -243,6 +247,18 @@ def url(self) -> Optional[pulumi.Input[str]]: def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) + @property + @pulumi.getter(name="workspacePath") + def workspace_path(self) -> Optional[pulumi.Input[str]]: + """ + path on Workspace File System (WSFS) in form of `/Workspace` + `path` + """ + return pulumi.get(self, "workspace_path") + + @workspace_path.setter + def workspace_path(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "workspace_path", value) + class Repo(pulumi.CustomResource): @overload @@ -330,6 +346,7 @@ def _internal_init(__self__, if url is None and not opts.urn: raise TypeError("Missing required property 'url'") __props__.__dict__["url"] = url + __props__.__dict__["workspace_path"] = None super(Repo, __self__).__init__( 'databricks:index/repo:Repo', resource_name, @@ -346,7 +363,8 @@ def get(resource_name: str, path: Optional[pulumi.Input[str]] = None, sparse_checkout: Optional[pulumi.Input[pulumi.InputType['RepoSparseCheckoutArgs']]] = None, tag: Optional[pulumi.Input[str]] = None, - url: Optional[pulumi.Input[str]] = None) -> 'Repo': + url: Optional[pulumi.Input[str]] = None, + workspace_path: Optional[pulumi.Input[str]] = None) -> 'Repo': """ Get an existing Repo resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -360,6 +378,7 @@ def get(resource_name: str, :param pulumi.Input[str] path: path to put the checked out Repo. If not specified, then repo will be created in the user's repo directory (`/Repos//...`). If the value changes, repo is re-created. :param pulumi.Input[str] tag: name of the tag for initial checkout. Conflicts with `branch`. :param pulumi.Input[str] url: The URL of the Git Repository to clone from. If the value changes, repo is re-created. + :param pulumi.Input[str] workspace_path: path on Workspace File System (WSFS) in form of `/Workspace` + `path` """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -372,6 +391,7 @@ def get(resource_name: str, __props__.__dict__["sparse_checkout"] = sparse_checkout __props__.__dict__["tag"] = tag __props__.__dict__["url"] = url + __props__.__dict__["workspace_path"] = workspace_path return Repo(resource_name, opts=opts, __props__=__props__) @property @@ -427,3 +447,11 @@ def url(self) -> pulumi.Output[str]: """ return pulumi.get(self, "url") + @property + @pulumi.getter(name="workspacePath") + def workspace_path(self) -> pulumi.Output[str]: + """ + path on Workspace File System (WSFS) in form of `/Workspace` + `path` + """ + return pulumi.get(self, "workspace_path") + diff --git a/sdk/python/pulumi_databricks/schema.py b/sdk/python/pulumi_databricks/schema.py index b2568378..4162fe1d 100644 --- a/sdk/python/pulumi_databricks/schema.py +++ b/sdk/python/pulumi_databricks/schema.py @@ -289,6 +289,8 @@ def __init__(__self__, storage_root: Optional[pulumi.Input[str]] = None, __props__=None): """ + > **Note** This resource could be only used with workspace-level provider! + Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. A `Schema` is contained within Catalog and can contain tables & views. @@ -344,6 +346,8 @@ def __init__(__self__, args: SchemaArgs, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be only used with workspace-level provider! + Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. A `Schema` is contained within Catalog and can contain tables & views. diff --git a/sdk/python/pulumi_databricks/sql_endpoint.py b/sdk/python/pulumi_databricks/sql_endpoint.py index 99aa2669..3be8e72f 100644 --- a/sdk/python/pulumi_databricks/sql_endpoint.py +++ b/sdk/python/pulumi_databricks/sql_endpoint.py @@ -23,14 +23,10 @@ def __init__(__self__, *, enable_photon: Optional[pulumi.Input[bool]] = None, enable_serverless_compute: Optional[pulumi.Input[bool]] = None, instance_profile_arn: Optional[pulumi.Input[str]] = None, - jdbc_url: Optional[pulumi.Input[str]] = None, max_num_clusters: Optional[pulumi.Input[int]] = None, min_num_clusters: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - num_clusters: Optional[pulumi.Input[int]] = None, - odbc_params: Optional[pulumi.Input['SqlEndpointOdbcParamsArgs']] = None, spot_instance_policy: Optional[pulumi.Input[str]] = None, - state: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input['SqlEndpointTagsArgs']] = None, warehouse_type: Optional[pulumi.Input[str]] = None): """ @@ -45,14 +41,12 @@ def __init__(__self__, *, - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). - :param pulumi.Input[str] jdbc_url: JDBC connection string. :param pulumi.Input[int] max_num_clusters: Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. :param pulumi.Input[int] min_num_clusters: Minimum number of clusters available when a SQL warehouse is running. The default is `1`. :param pulumi.Input[str] name: Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - :param pulumi.Input['SqlEndpointOdbcParamsArgs'] odbc_params: ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. :param pulumi.Input[str] spot_instance_policy: The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. :param pulumi.Input['SqlEndpointTagsArgs'] tags: Databricks tags all endpoint resources with these tags. - :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. """ pulumi.set(__self__, "cluster_size", cluster_size) if auto_stop_mins is not None: @@ -67,22 +61,14 @@ def __init__(__self__, *, pulumi.set(__self__, "enable_serverless_compute", enable_serverless_compute) if instance_profile_arn is not None: pulumi.set(__self__, "instance_profile_arn", instance_profile_arn) - if jdbc_url is not None: - pulumi.set(__self__, "jdbc_url", jdbc_url) if max_num_clusters is not None: pulumi.set(__self__, "max_num_clusters", max_num_clusters) if min_num_clusters is not None: pulumi.set(__self__, "min_num_clusters", min_num_clusters) if name is not None: pulumi.set(__self__, "name", name) - if num_clusters is not None: - pulumi.set(__self__, "num_clusters", num_clusters) - if odbc_params is not None: - pulumi.set(__self__, "odbc_params", odbc_params) if spot_instance_policy is not None: pulumi.set(__self__, "spot_instance_policy", spot_instance_policy) - if state is not None: - pulumi.set(__self__, "state", state) if tags is not None: pulumi.set(__self__, "tags", tags) if warehouse_type is not None: @@ -173,18 +159,6 @@ def instance_profile_arn(self) -> Optional[pulumi.Input[str]]: def instance_profile_arn(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "instance_profile_arn", value) - @property - @pulumi.getter(name="jdbcUrl") - def jdbc_url(self) -> Optional[pulumi.Input[str]]: - """ - JDBC connection string. - """ - return pulumi.get(self, "jdbc_url") - - @jdbc_url.setter - def jdbc_url(self, value: Optional[pulumi.Input[str]]): - pulumi.set(self, "jdbc_url", value) - @property @pulumi.getter(name="maxNumClusters") def max_num_clusters(self) -> Optional[pulumi.Input[int]]: @@ -221,27 +195,6 @@ def name(self) -> Optional[pulumi.Input[str]]: def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) - @property - @pulumi.getter(name="numClusters") - def num_clusters(self) -> Optional[pulumi.Input[int]]: - return pulumi.get(self, "num_clusters") - - @num_clusters.setter - def num_clusters(self, value: Optional[pulumi.Input[int]]): - pulumi.set(self, "num_clusters", value) - - @property - @pulumi.getter(name="odbcParams") - def odbc_params(self) -> Optional[pulumi.Input['SqlEndpointOdbcParamsArgs']]: - """ - ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. - """ - return pulumi.get(self, "odbc_params") - - @odbc_params.setter - def odbc_params(self, value: Optional[pulumi.Input['SqlEndpointOdbcParamsArgs']]): - pulumi.set(self, "odbc_params", value) - @property @pulumi.getter(name="spotInstancePolicy") def spot_instance_policy(self) -> Optional[pulumi.Input[str]]: @@ -254,15 +207,6 @@ def spot_instance_policy(self) -> Optional[pulumi.Input[str]]: def spot_instance_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "spot_instance_policy", value) - @property - @pulumi.getter - def state(self) -> Optional[pulumi.Input[str]]: - return pulumi.get(self, "state") - - @state.setter - def state(self, value: Optional[pulumi.Input[str]]): - pulumi.set(self, "state", value) - @property @pulumi.getter def tags(self) -> Optional[pulumi.Input['SqlEndpointTagsArgs']]: @@ -279,7 +223,7 @@ def tags(self, value: Optional[pulumi.Input['SqlEndpointTagsArgs']]): @pulumi.getter(name="warehouseType") def warehouse_type(self) -> Optional[pulumi.Input[str]]: """ - SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. """ return pulumi.get(self, "warehouse_type") @@ -294,14 +238,17 @@ def __init__(__self__, *, auto_stop_mins: Optional[pulumi.Input[int]] = None, channel: Optional[pulumi.Input['SqlEndpointChannelArgs']] = None, cluster_size: Optional[pulumi.Input[str]] = None, + creator_name: Optional[pulumi.Input[str]] = None, data_source_id: Optional[pulumi.Input[str]] = None, enable_photon: Optional[pulumi.Input[bool]] = None, enable_serverless_compute: Optional[pulumi.Input[bool]] = None, + healths: Optional[pulumi.Input[Sequence[pulumi.Input['SqlEndpointHealthArgs']]]] = None, instance_profile_arn: Optional[pulumi.Input[str]] = None, jdbc_url: Optional[pulumi.Input[str]] = None, max_num_clusters: Optional[pulumi.Input[int]] = None, min_num_clusters: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, + num_active_sessions: Optional[pulumi.Input[int]] = None, num_clusters: Optional[pulumi.Input[int]] = None, odbc_params: Optional[pulumi.Input['SqlEndpointOdbcParamsArgs']] = None, spot_instance_policy: Optional[pulumi.Input[str]] = None, @@ -313,6 +260,7 @@ def __init__(__self__, *, :param pulumi.Input[int] auto_stop_mins: Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop. :param pulumi.Input['SqlEndpointChannelArgs'] channel: block, consisting of following fields: :param pulumi.Input[str] cluster_size: The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". + :param pulumi.Input[str] creator_name: The username of the user who created the endpoint. :param pulumi.Input[str] data_source_id: ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. :param pulumi.Input[bool] enable_photon: Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional and is enabled by default. :param pulumi.Input[bool] enable_serverless_compute: Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. @@ -320,14 +268,18 @@ def __init__(__self__, *, - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). + :param pulumi.Input[Sequence[pulumi.Input['SqlEndpointHealthArgs']]] healths: Health status of the endpoint. :param pulumi.Input[str] jdbc_url: JDBC connection string. :param pulumi.Input[int] max_num_clusters: Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. :param pulumi.Input[int] min_num_clusters: Minimum number of clusters available when a SQL warehouse is running. The default is `1`. :param pulumi.Input[str] name: Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. + :param pulumi.Input[int] num_active_sessions: The current number of clusters used by the endpoint. + :param pulumi.Input[int] num_clusters: The current number of clusters used by the endpoint. :param pulumi.Input['SqlEndpointOdbcParamsArgs'] odbc_params: ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. :param pulumi.Input[str] spot_instance_policy: The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. + :param pulumi.Input[str] state: The current state of the endpoint. :param pulumi.Input['SqlEndpointTagsArgs'] tags: Databricks tags all endpoint resources with these tags. - :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. """ if auto_stop_mins is not None: pulumi.set(__self__, "auto_stop_mins", auto_stop_mins) @@ -335,12 +287,16 @@ def __init__(__self__, *, pulumi.set(__self__, "channel", channel) if cluster_size is not None: pulumi.set(__self__, "cluster_size", cluster_size) + if creator_name is not None: + pulumi.set(__self__, "creator_name", creator_name) if data_source_id is not None: pulumi.set(__self__, "data_source_id", data_source_id) if enable_photon is not None: pulumi.set(__self__, "enable_photon", enable_photon) if enable_serverless_compute is not None: pulumi.set(__self__, "enable_serverless_compute", enable_serverless_compute) + if healths is not None: + pulumi.set(__self__, "healths", healths) if instance_profile_arn is not None: pulumi.set(__self__, "instance_profile_arn", instance_profile_arn) if jdbc_url is not None: @@ -351,6 +307,8 @@ def __init__(__self__, *, pulumi.set(__self__, "min_num_clusters", min_num_clusters) if name is not None: pulumi.set(__self__, "name", name) + if num_active_sessions is not None: + pulumi.set(__self__, "num_active_sessions", num_active_sessions) if num_clusters is not None: pulumi.set(__self__, "num_clusters", num_clusters) if odbc_params is not None: @@ -400,6 +358,18 @@ def cluster_size(self) -> Optional[pulumi.Input[str]]: def cluster_size(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cluster_size", value) + @property + @pulumi.getter(name="creatorName") + def creator_name(self) -> Optional[pulumi.Input[str]]: + """ + The username of the user who created the endpoint. + """ + return pulumi.get(self, "creator_name") + + @creator_name.setter + def creator_name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "creator_name", value) + @property @pulumi.getter(name="dataSourceId") def data_source_id(self) -> Optional[pulumi.Input[str]]: @@ -440,6 +410,18 @@ def enable_serverless_compute(self) -> Optional[pulumi.Input[bool]]: def enable_serverless_compute(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_serverless_compute", value) + @property + @pulumi.getter + def healths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SqlEndpointHealthArgs']]]]: + """ + Health status of the endpoint. + """ + return pulumi.get(self, "healths") + + @healths.setter + def healths(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SqlEndpointHealthArgs']]]]): + pulumi.set(self, "healths", value) + @property @pulumi.getter(name="instanceProfileArn") def instance_profile_arn(self) -> Optional[pulumi.Input[str]]: @@ -497,9 +479,24 @@ def name(self) -> Optional[pulumi.Input[str]]: def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) + @property + @pulumi.getter(name="numActiveSessions") + def num_active_sessions(self) -> Optional[pulumi.Input[int]]: + """ + The current number of clusters used by the endpoint. + """ + return pulumi.get(self, "num_active_sessions") + + @num_active_sessions.setter + def num_active_sessions(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "num_active_sessions", value) + @property @pulumi.getter(name="numClusters") def num_clusters(self) -> Optional[pulumi.Input[int]]: + """ + The current number of clusters used by the endpoint. + """ return pulumi.get(self, "num_clusters") @num_clusters.setter @@ -533,6 +530,9 @@ def spot_instance_policy(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter def state(self) -> Optional[pulumi.Input[str]]: + """ + The current state of the endpoint. + """ return pulumi.get(self, "state") @state.setter @@ -555,7 +555,7 @@ def tags(self, value: Optional[pulumi.Input['SqlEndpointTagsArgs']]): @pulumi.getter(name="warehouseType") def warehouse_type(self) -> Optional[pulumi.Input[str]]: """ - SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. """ return pulumi.get(self, "warehouse_type") @@ -576,14 +576,10 @@ def __init__(__self__, enable_photon: Optional[pulumi.Input[bool]] = None, enable_serverless_compute: Optional[pulumi.Input[bool]] = None, instance_profile_arn: Optional[pulumi.Input[str]] = None, - jdbc_url: Optional[pulumi.Input[str]] = None, max_num_clusters: Optional[pulumi.Input[int]] = None, min_num_clusters: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - num_clusters: Optional[pulumi.Input[int]] = None, - odbc_params: Optional[pulumi.Input[pulumi.InputType['SqlEndpointOdbcParamsArgs']]] = None, spot_instance_policy: Optional[pulumi.Input[str]] = None, - state: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[pulumi.InputType['SqlEndpointTagsArgs']]] = None, warehouse_type: Optional[pulumi.Input[str]] = None, __props__=None): @@ -642,14 +638,12 @@ def __init__(__self__, - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). - :param pulumi.Input[str] jdbc_url: JDBC connection string. :param pulumi.Input[int] max_num_clusters: Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. :param pulumi.Input[int] min_num_clusters: Minimum number of clusters available when a SQL warehouse is running. The default is `1`. :param pulumi.Input[str] name: Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. - :param pulumi.Input[pulumi.InputType['SqlEndpointOdbcParamsArgs']] odbc_params: ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. :param pulumi.Input[str] spot_instance_policy: The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. :param pulumi.Input[pulumi.InputType['SqlEndpointTagsArgs']] tags: Databricks tags all endpoint resources with these tags. - :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. """ ... @overload @@ -722,14 +716,10 @@ def _internal_init(__self__, enable_photon: Optional[pulumi.Input[bool]] = None, enable_serverless_compute: Optional[pulumi.Input[bool]] = None, instance_profile_arn: Optional[pulumi.Input[str]] = None, - jdbc_url: Optional[pulumi.Input[str]] = None, max_num_clusters: Optional[pulumi.Input[int]] = None, min_num_clusters: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - num_clusters: Optional[pulumi.Input[int]] = None, - odbc_params: Optional[pulumi.Input[pulumi.InputType['SqlEndpointOdbcParamsArgs']]] = None, spot_instance_policy: Optional[pulumi.Input[str]] = None, - state: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[pulumi.InputType['SqlEndpointTagsArgs']]] = None, warehouse_type: Optional[pulumi.Input[str]] = None, __props__=None): @@ -750,16 +740,19 @@ def _internal_init(__self__, __props__.__dict__["enable_photon"] = enable_photon __props__.__dict__["enable_serverless_compute"] = enable_serverless_compute __props__.__dict__["instance_profile_arn"] = instance_profile_arn - __props__.__dict__["jdbc_url"] = jdbc_url __props__.__dict__["max_num_clusters"] = max_num_clusters __props__.__dict__["min_num_clusters"] = min_num_clusters __props__.__dict__["name"] = name - __props__.__dict__["num_clusters"] = num_clusters - __props__.__dict__["odbc_params"] = odbc_params __props__.__dict__["spot_instance_policy"] = spot_instance_policy - __props__.__dict__["state"] = state __props__.__dict__["tags"] = tags __props__.__dict__["warehouse_type"] = warehouse_type + __props__.__dict__["creator_name"] = None + __props__.__dict__["healths"] = None + __props__.__dict__["jdbc_url"] = None + __props__.__dict__["num_active_sessions"] = None + __props__.__dict__["num_clusters"] = None + __props__.__dict__["odbc_params"] = None + __props__.__dict__["state"] = None super(SqlEndpoint, __self__).__init__( 'databricks:index/sqlEndpoint:SqlEndpoint', resource_name, @@ -773,14 +766,17 @@ def get(resource_name: str, auto_stop_mins: Optional[pulumi.Input[int]] = None, channel: Optional[pulumi.Input[pulumi.InputType['SqlEndpointChannelArgs']]] = None, cluster_size: Optional[pulumi.Input[str]] = None, + creator_name: Optional[pulumi.Input[str]] = None, data_source_id: Optional[pulumi.Input[str]] = None, enable_photon: Optional[pulumi.Input[bool]] = None, enable_serverless_compute: Optional[pulumi.Input[bool]] = None, + healths: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SqlEndpointHealthArgs']]]]] = None, instance_profile_arn: Optional[pulumi.Input[str]] = None, jdbc_url: Optional[pulumi.Input[str]] = None, max_num_clusters: Optional[pulumi.Input[int]] = None, min_num_clusters: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, + num_active_sessions: Optional[pulumi.Input[int]] = None, num_clusters: Optional[pulumi.Input[int]] = None, odbc_params: Optional[pulumi.Input[pulumi.InputType['SqlEndpointOdbcParamsArgs']]] = None, spot_instance_policy: Optional[pulumi.Input[str]] = None, @@ -797,6 +793,7 @@ def get(resource_name: str, :param pulumi.Input[int] auto_stop_mins: Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop. :param pulumi.Input[pulumi.InputType['SqlEndpointChannelArgs']] channel: block, consisting of following fields: :param pulumi.Input[str] cluster_size: The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large". + :param pulumi.Input[str] creator_name: The username of the user who created the endpoint. :param pulumi.Input[str] data_source_id: ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint. :param pulumi.Input[bool] enable_photon: Whether to enable [Photon](https://databricks.com/product/delta-engine). This field is optional and is enabled by default. :param pulumi.Input[bool] enable_serverless_compute: Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. @@ -804,14 +801,18 @@ def get(resource_name: str, - **For AWS**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated [terms of use](https://docs.databricks.com/sql/admin/serverless.html#accept-terms), workspace admins are prompted in the Databricks SQL UI. A workspace must meet the [requirements](https://docs.databricks.com/sql/admin/serverless.html#requirements) and might require an update to its instance profile role to [add a trust relationship](https://docs.databricks.com/sql/admin/serverless.html#aws-instance-profile-setup). - **For Azure**, If omitted, the default is `false` for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to `true` if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the [requirements](https://learn.microsoft.com/azure/databricks/sql/admin/serverless) and might require an update to its [Azure storage firewall](https://learn.microsoft.com/azure/databricks/sql/admin/serverless-firewall). + :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SqlEndpointHealthArgs']]]] healths: Health status of the endpoint. :param pulumi.Input[str] jdbc_url: JDBC connection string. :param pulumi.Input[int] max_num_clusters: Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to `1`. :param pulumi.Input[int] min_num_clusters: Minimum number of clusters available when a SQL warehouse is running. The default is `1`. :param pulumi.Input[str] name: Name of the Databricks SQL release channel. Possible values are: `CHANNEL_NAME_PREVIEW` and `CHANNEL_NAME_CURRENT`. Default is `CHANNEL_NAME_CURRENT`. + :param pulumi.Input[int] num_active_sessions: The current number of clusters used by the endpoint. + :param pulumi.Input[int] num_clusters: The current number of clusters used by the endpoint. :param pulumi.Input[pulumi.InputType['SqlEndpointOdbcParamsArgs']] odbc_params: ODBC connection params: `odbc_params.hostname`, `odbc_params.path`, `odbc_params.protocol`, and `odbc_params.port`. :param pulumi.Input[str] spot_instance_policy: The spot policy to use for allocating instances to clusters: `COST_OPTIMIZED` or `RELIABILITY_OPTIMIZED`. This field is optional. Default is `COST_OPTIMIZED`. + :param pulumi.Input[str] state: The current state of the endpoint. :param pulumi.Input[pulumi.InputType['SqlEndpointTagsArgs']] tags: Databricks tags all endpoint resources with these tags. - :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + :param pulumi.Input[str] warehouse_type: SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -820,14 +821,17 @@ def get(resource_name: str, __props__.__dict__["auto_stop_mins"] = auto_stop_mins __props__.__dict__["channel"] = channel __props__.__dict__["cluster_size"] = cluster_size + __props__.__dict__["creator_name"] = creator_name __props__.__dict__["data_source_id"] = data_source_id __props__.__dict__["enable_photon"] = enable_photon __props__.__dict__["enable_serverless_compute"] = enable_serverless_compute + __props__.__dict__["healths"] = healths __props__.__dict__["instance_profile_arn"] = instance_profile_arn __props__.__dict__["jdbc_url"] = jdbc_url __props__.__dict__["max_num_clusters"] = max_num_clusters __props__.__dict__["min_num_clusters"] = min_num_clusters __props__.__dict__["name"] = name + __props__.__dict__["num_active_sessions"] = num_active_sessions __props__.__dict__["num_clusters"] = num_clusters __props__.__dict__["odbc_params"] = odbc_params __props__.__dict__["spot_instance_policy"] = spot_instance_policy @@ -860,6 +864,14 @@ def cluster_size(self) -> pulumi.Output[str]: """ return pulumi.get(self, "cluster_size") + @property + @pulumi.getter(name="creatorName") + def creator_name(self) -> pulumi.Output[str]: + """ + The username of the user who created the endpoint. + """ + return pulumi.get(self, "creator_name") + @property @pulumi.getter(name="dataSourceId") def data_source_id(self) -> pulumi.Output[str]: @@ -888,6 +900,14 @@ def enable_serverless_compute(self) -> pulumi.Output[Optional[bool]]: """ return pulumi.get(self, "enable_serverless_compute") + @property + @pulumi.getter + def healths(self) -> pulumi.Output[Sequence['outputs.SqlEndpointHealth']]: + """ + Health status of the endpoint. + """ + return pulumi.get(self, "healths") + @property @pulumi.getter(name="instanceProfileArn") def instance_profile_arn(self) -> pulumi.Output[Optional[str]]: @@ -925,9 +945,20 @@ def name(self) -> pulumi.Output[str]: """ return pulumi.get(self, "name") + @property + @pulumi.getter(name="numActiveSessions") + def num_active_sessions(self) -> pulumi.Output[int]: + """ + The current number of clusters used by the endpoint. + """ + return pulumi.get(self, "num_active_sessions") + @property @pulumi.getter(name="numClusters") - def num_clusters(self) -> pulumi.Output[Optional[int]]: + def num_clusters(self) -> pulumi.Output[int]: + """ + The current number of clusters used by the endpoint. + """ return pulumi.get(self, "num_clusters") @property @@ -949,6 +980,9 @@ def spot_instance_policy(self) -> pulumi.Output[Optional[str]]: @property @pulumi.getter def state(self) -> pulumi.Output[str]: + """ + The current state of the endpoint. + """ return pulumi.get(self, "state") @property @@ -963,7 +997,7 @@ def tags(self) -> pulumi.Output[Optional['outputs.SqlEndpointTags']]: @pulumi.getter(name="warehouseType") def warehouse_type(self) -> pulumi.Output[Optional[str]]: """ - SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. + SQL warehouse type. See for [AWS](https://docs.databricks.com/sql/admin/sql-endpoints.html#switch-the-sql-warehouse-type-pro-classic-or-serverless) or [Azure](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/create-sql-warehouse#--upgrade-a-pro-or-classic-sql-warehouse-to-a-serverless-sql-warehouse). Set to `PRO` or `CLASSIC`. If the field `enable_serverless_compute` has the value `true` either explicitly or through the default logic (see that field above for details), the default is `PRO`, which is required for serverless SQL warehouses. Otherwise, the default is `CLASSIC`. """ return pulumi.get(self, "warehouse_type") diff --git a/sdk/python/pulumi_databricks/storage_credential.py b/sdk/python/pulumi_databricks/storage_credential.py index 288c4fda..d316231a 100644 --- a/sdk/python/pulumi_databricks/storage_credential.py +++ b/sdk/python/pulumi_databricks/storage_credential.py @@ -27,7 +27,8 @@ def __init__(__self__, *, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, - read_only: Optional[pulumi.Input[bool]] = None): + read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a StorageCredential resource. :param pulumi.Input[bool] force_destroy: Delete storage credential regardless of its dependencies. @@ -37,6 +38,7 @@ def __init__(__self__, *, :param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the storage credential owner. :param pulumi.Input[bool] read_only: Indicates whether the storage credential is only usable for read operations. + :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the storage credential. """ if aws_iam_role is not None: pulumi.set(__self__, "aws_iam_role", aws_iam_role) @@ -62,6 +64,8 @@ def __init__(__self__, *, pulumi.set(__self__, "owner", owner) if read_only is not None: pulumi.set(__self__, "read_only", read_only) + if skip_validation is not None: + pulumi.set(__self__, "skip_validation", skip_validation) @property @pulumi.getter(name="awsIamRole") @@ -188,6 +192,18 @@ def read_only(self) -> Optional[pulumi.Input[bool]]: def read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "read_only", value) + @property + @pulumi.getter(name="skipValidation") + def skip_validation(self) -> Optional[pulumi.Input[bool]]: + """ + Suppress validation errors if any & force save the storage credential. + """ + return pulumi.get(self, "skip_validation") + + @skip_validation.setter + def skip_validation(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "skip_validation", value) + @pulumi.input_type class _StorageCredentialState: @@ -203,7 +219,8 @@ def __init__(__self__, *, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, - read_only: Optional[pulumi.Input[bool]] = None): + read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None): """ Input properties used for looking up and filtering StorageCredential resources. :param pulumi.Input[bool] force_destroy: Delete storage credential regardless of its dependencies. @@ -213,6 +230,7 @@ def __init__(__self__, *, :param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the storage credential owner. :param pulumi.Input[bool] read_only: Indicates whether the storage credential is only usable for read operations. + :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the storage credential. """ if aws_iam_role is not None: pulumi.set(__self__, "aws_iam_role", aws_iam_role) @@ -238,6 +256,8 @@ def __init__(__self__, *, pulumi.set(__self__, "owner", owner) if read_only is not None: pulumi.set(__self__, "read_only", read_only) + if skip_validation is not None: + pulumi.set(__self__, "skip_validation", skip_validation) @property @pulumi.getter(name="awsIamRole") @@ -364,6 +384,18 @@ def read_only(self) -> Optional[pulumi.Input[bool]]: def read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "read_only", value) + @property + @pulumi.getter(name="skipValidation") + def skip_validation(self) -> Optional[pulumi.Input[bool]]: + """ + Suppress validation errors if any & force save the storage credential. + """ + return pulumi.get(self, "skip_validation") + + @skip_validation.setter + def skip_validation(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "skip_validation", value) + class StorageCredential(pulumi.CustomResource): @overload @@ -382,8 +414,11 @@ def __init__(__self__, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None, __props__=None): """ + > **Note** This resource could be used with account or workspace-level provider. + To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: - `StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. @@ -461,6 +496,7 @@ def __init__(__self__, :param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the storage credential owner. :param pulumi.Input[bool] read_only: Indicates whether the storage credential is only usable for read operations. + :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the storage credential. """ ... @overload @@ -469,6 +505,8 @@ def __init__(__self__, args: Optional[StorageCredentialArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ + > **Note** This resource could be used with account or workspace-level provider. + To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: - `StorageCredential` represents authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal/managed identity for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential. @@ -564,6 +602,7 @@ def _internal_init(__self__, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -585,6 +624,7 @@ def _internal_init(__self__, __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner __props__.__dict__["read_only"] = read_only + __props__.__dict__["skip_validation"] = skip_validation super(StorageCredential, __self__).__init__( 'databricks:index/storageCredential:StorageCredential', resource_name, @@ -606,7 +646,8 @@ def get(resource_name: str, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, - read_only: Optional[pulumi.Input[bool]] = None) -> 'StorageCredential': + read_only: Optional[pulumi.Input[bool]] = None, + skip_validation: Optional[pulumi.Input[bool]] = None) -> 'StorageCredential': """ Get an existing StorageCredential resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -621,6 +662,7 @@ def get(resource_name: str, :param pulumi.Input[str] name: Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the storage credential owner. :param pulumi.Input[bool] read_only: Indicates whether the storage credential is only usable for read operations. + :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the storage credential. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -638,6 +680,7 @@ def get(resource_name: str, __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner __props__.__dict__["read_only"] = read_only + __props__.__dict__["skip_validation"] = skip_validation return StorageCredential(resource_name, opts=opts, __props__=__props__) @property @@ -717,3 +760,11 @@ def read_only(self) -> pulumi.Output[Optional[bool]]: """ return pulumi.get(self, "read_only") + @property + @pulumi.getter(name="skipValidation") + def skip_validation(self) -> pulumi.Output[Optional[bool]]: + """ + Suppress validation errors if any & force save the storage credential. + """ + return pulumi.get(self, "skip_validation") + diff --git a/sdk/python/pulumi_databricks/system_schema.py b/sdk/python/pulumi_databricks/system_schema.py index 51e7d6fa..9954d395 100644 --- a/sdk/python/pulumi_databricks/system_schema.py +++ b/sdk/python/pulumi_databricks/system_schema.py @@ -114,8 +114,7 @@ def __init__(__self__, """ > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). - > **Notes** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. + > **Note** This resource could be only used with workspace-level provider! Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. @@ -152,8 +151,7 @@ def __init__(__self__, """ > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). - > **Notes** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. + > **Note** This resource could be only used with workspace-level provider! Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. diff --git a/sdk/python/pulumi_databricks/volume.py b/sdk/python/pulumi_databricks/volume.py index 91307874..25341d85 100644 --- a/sdk/python/pulumi_databricks/volume.py +++ b/sdk/python/pulumi_databricks/volume.py @@ -264,6 +264,8 @@ def __init__(__self__, """ > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). + > **Note** This resource could be only used with workspace-level provider! + Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. A volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog. @@ -342,6 +344,8 @@ def __init__(__self__, """ > **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). + > **Note** This resource could be only used with workspace-level provider! + Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. A volume resides in the third layer of Unity Catalog’s three-level namespace. Volumes are siblings to tables, views, and other objects organized under a schema in Unity Catalog.