From 436fe6630663b8604ee62c2b9b0532363bc39cab Mon Sep 17 00:00:00 2001 From: Johan Andersson Date: Tue, 19 Nov 2024 15:13:01 +0100 Subject: [PATCH] Add support for Azure BYOK exocompute (#205) --- docs/data-sources/azure_permissions.md | 1 + .../aws_private_container_registry.md | 13 +- .../azure_exocompute_cluster_attachment.md | 39 ++++ .../azure_private_container_registry.md | 115 ++++++++++ docs/resources/azure_subscription.md | 27 ++- go.mod | 2 +- go.sum | 4 +- .../provider/data_source_azure_permissions.go | 26 ++- internal/provider/names.go | 2 + internal/provider/provider.go | 2 + .../resource_aws_archival_location.go | 2 +- internal/provider/resource_aws_exocompute.go | 32 +-- ...ource_aws_exocompute_cluster_attachment.go | 20 +- ...resource_aws_private_container_registry.go | 38 +-- .../resource_azure_archival_location.go | 4 +- .../provider/resource_azure_exocompute.go | 28 ++- ...rce_azure_exocompute_cluster_attachment.go | 170 ++++++++++++++ ...source_azure_private_container_registry.go | 217 ++++++++++++++++++ .../provider/resource_azure_subscription.go | 178 ++++++++++++-- ...data_center_archival_location_amazon_s3.go | 2 +- 20 files changed, 822 insertions(+), 100 deletions(-) create mode 100644 docs/resources/azure_exocompute_cluster_attachment.md create mode 100644 docs/resources/azure_private_container_registry.md create mode 100644 internal/provider/resource_azure_exocompute_cluster_attachment.go create mode 100644 internal/provider/resource_azure_private_container_registry.go diff --git a/docs/data-sources/azure_permissions.md b/docs/data-sources/azure_permissions.md index fd253b7..592dd7d 100644 --- a/docs/data-sources/azure_permissions.md +++ b/docs/data-sources/azure_permissions.md @@ -103,6 +103,7 @@ resource "polaris_azure_subscription" "subscription" { - `feature` (String) RSC feature. Note that the feature name must be given in the `EXAMPLE_FEATURE_NAME` style. Possible values are `AZURE_SQL_DB_PROTECTION`, `AZURE_SQL_MI_PROTECTION`, `CLOUD_NATIVE_ARCHIVAL`, `CLOUD_NATIVE_ARCHIVAL_ENCRYPTION`, `CLOUD_NATIVE_BLOB_PROTECTION`, `CLOUD_NATIVE_PROTECTION` and `EXOCOMPUTE`. - `features` (Set of String, Deprecated) RSC features. Possible values are `AZURE_SQL_DB_PROTECTION`, `AZURE_SQL_MI_PROTECTION`, `CLOUD_NATIVE_ARCHIVAL`, `CLOUD_NATIVE_ARCHIVAL_ENCRYPTION`, `CLOUD_NATIVE_BLOB_PROTECTION`, `CLOUD_NATIVE_PROTECTION` and `EXOCOMPUTE`. **Deprecated:** use `feature` instead. +- `permission_groups` (Set of String) Permission groups for the RSC feature. Possible values are `BASIC`, `EXPORT_AND_RESTORE`, `FILE_LEVEL_RECOVERY`, `CLOUD_CLUSTER_ES`, `SNAPSHOT_PRIVATE_ACCESS`, `PRIVATE_ENDPOINTS`, `CUSTOMER_MANAGED_BASIC`, `ENCRYPTION`, `SQL_ARCHIVAL`, `RECOVERY` and `BACKUP_V2`. ### Read-Only diff --git a/docs/resources/aws_private_container_registry.md b/docs/resources/aws_private_container_registry.md index 118ab81..983c307 100644 --- a/docs/resources/aws_private_container_registry.md +++ b/docs/resources/aws_private_container_registry.md @@ -5,11 +5,7 @@ subcategory: "" description: |- The polaris_aws_private_container_registry resource enables the private container registry (PCR) feature for the RSC customer account. This disables the standard - Rubrik container registry. Once PCR has been enabled, it can only be disabled by - Rubrik customer support. - !> Note: Creating a polaris_aws_private_container_registry resource enables - the PCR feature for the RSC customer account. Destroying the resource will not - disabled PCR, it can only be disabled by contacting Rubrik customer support. + Rubrik container registry. ~> Note: Even though the polaris_aws_private_container_registry resource ID is an RSC cloud account ID, there can only be a single PCR per RSC customer account. @@ -70,12 +66,7 @@ description: |- The `polaris_aws_private_container_registry` resource enables the private container registry (PCR) feature for the RSC customer account. This disables the standard -Rubrik container registry. Once PCR has been enabled, it can only be disabled by -Rubrik customer support. - -!> **Note:** Creating a `polaris_aws_private_container_registry` resource enables - the PCR feature for the RSC customer account. Destroying the resource will not - disabled PCR, it can only be disabled by contacting Rubrik customer support. +Rubrik container registry. ~> **Note:** Even though the `polaris_aws_private_container_registry` resource ID is an RSC cloud account ID, there can only be a single PCR per RSC customer diff --git a/docs/resources/azure_exocompute_cluster_attachment.md b/docs/resources/azure_exocompute_cluster_attachment.md new file mode 100644 index 0000000..eb75908 --- /dev/null +++ b/docs/resources/azure_exocompute_cluster_attachment.md @@ -0,0 +1,39 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "polaris_azure_exocompute_cluster_attachment Resource - terraform-provider-polaris" +subcategory: "" +description: |- + The polaris_azure_exocompute_cluster_attachment resource attaches an Azure AKS + cluster to a customer managed host Exocompute configuration, allowing RSC to use + the cluster for Exocompute operations. + The cluster name must be specified as /, e.g. + my-resource-group/my-cluster. +--- + +# polaris_azure_exocompute_cluster_attachment (Resource) + +The `polaris_azure_exocompute_cluster_attachment` resource attaches an Azure AKS +cluster to a customer managed host Exocompute configuration, allowing RSC to use +the cluster for Exocompute operations. + +The cluster name must be specified as `/`, e.g. +`my-resource-group/my-cluster`. + + + + +## Schema + +### Required + +- `cluster_name` (String) Azure AKS cluster name. Changing this forces a new resource to be created. +- `exocompute_id` (String) RSC exocompute configuration ID (UUID). Changing this forces a new resource to be created. + +### Optional + +- `token_refresh` (Number) To force a refresh of the authentication token, part of the connection command and manifest, increase the value of this field. The token is valid for 24 hours. + +### Read-Only + +- `id` (String) RSC cluster ID (UUID). +- `manifest` (String) Kubernetes manifest which can be passed to `kubectl apply` to create a connection between the cluster and RSC. See `connection_command` for an alternative connection method. diff --git a/docs/resources/azure_private_container_registry.md b/docs/resources/azure_private_container_registry.md new file mode 100644 index 0000000..fa781eb --- /dev/null +++ b/docs/resources/azure_private_container_registry.md @@ -0,0 +1,115 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "polaris_azure_private_container_registry Resource - terraform-provider-polaris" +subcategory: "" +description: |- + The polaris_azure_private_container_registry resource enables the private + container registry (PCR) feature for the RSC customer account. This disables the + standard Rubrik container registry. + ~> Note: Even though the polaris_azure_private_container_registry resource + ID is an RSC cloud account ID, there can only be a single PCR per RSC + customer account. + Exocompute Image Bundles + The following GraphQL query can be used to retrieve information about the image + bundles used by RSC for exocompute: + graphql + query ExotaskImageBundle { + exotaskImageBundle { + bundleImages { + name + sha + tag + } + bundleVersion + eksVersion + repoUrl + } + } + + The repoUrl field holds the URL to the RSC container registry from where the + RSC images can be pulled. + The following GraphQL mutation can be used to set the approved bundle version + for the RSC customer account: + graphql + mutation SetBundleApprovalStatus($input: SetBundleApprovalStatusInput!) { + setBundleApprovalStatus(input: $input) + } + + The input is an object with the following structure: + json + { + "input": { + "approvalStatus": "APPROVED", + "bundleVersion": "1.164", + } + } + + Where approvalStatus can be either APPROVED or REJECTED. bundleVersion + is the the bundle version being approved or rejected. bundleMetadata is + optional. +--- + +# polaris_azure_private_container_registry (Resource) + +The `polaris_azure_private_container_registry` resource enables the private +container registry (PCR) feature for the RSC customer account. This disables the +standard Rubrik container registry. + +~> **Note:** Even though the `polaris_azure_private_container_registry` resource + ID is an RSC cloud account ID, there can only be a single PCR per RSC + customer account. + +## Exocompute Image Bundles +The following GraphQL query can be used to retrieve information about the image +bundles used by RSC for exocompute: +```graphql +query ExotaskImageBundle { + exotaskImageBundle { + bundleImages { + name + sha + tag + } + bundleVersion + eksVersion + repoUrl + } +} +``` +The `repoUrl` field holds the URL to the RSC container registry from where the +RSC images can be pulled. + +The following GraphQL mutation can be used to set the approved bundle version +for the RSC customer account: +```graphql +mutation SetBundleApprovalStatus($input: SetBundleApprovalStatusInput!) { + setBundleApprovalStatus(input: $input) +} +``` +The input is an object with the following structure: +```json +{ + "input": { + "approvalStatus": "APPROVED", + "bundleVersion": "1.164", + } +} +``` +Where `approvalStatus` can be either `APPROVED` or `REJECTED`. `bundleVersion` +is the the bundle version being approved or rejected. `bundleMetadata` is +optional. + + + + +## Schema + +### Required + +- `app_id` (String) Azure app registration application ID. Also known as the client ID. +- `cloud_account_id` (String) RSC cloud account ID (UUID). Changing this forces a new resource to be created. +- `url` (String) URL for customer provided private container registry. + +### Read-Only + +- `id` (String) RSC cloud account ID (UUID). diff --git a/docs/resources/azure_subscription.md b/docs/resources/azure_subscription.md index 2e80344..fc6a63b 100644 --- a/docs/resources/azure_subscription.md +++ b/docs/resources/azure_subscription.md @@ -12,13 +12,15 @@ description: |- for disaster recovery and long-term retention. 2. cloud_native_archival_encryption - Allows cloud archival locations to be encrypted with customer managed keys. - 3. cloud_native_protection - Provides protection for Azure virtual machines and + 3. cloud_native_blob_protection - Provides protection for Azure Blob Storage + through the rules and policies of SLA Domains. + 4. cloud_native_protection - Provides protection for Azure virtual machines and managed disks through the rules and policies of SLA Domains. - 4. exocompute - Provides snapshot indexing, file recovery, storage tiering, and + 5. exocompute - Provides snapshot indexing, file recovery, storage tiering, and application-consistent protection of Azure objects. - 5. sql_db_protection - Provides centralized database backup management and + 6. sql_db_protection - Provides centralized database backup management and recovery in an Azure SQL Database deployment. - 6. sql_mi_protection - Provides centralized database backup management and + 7. sql_mi_protection - Provides centralized database backup management and recovery for an Azure SQL Managed Instance deployment. Each feature's permissions field can be used with the polaris_azure_permissions data source to inform RSC about permission updates when the Terraform configuration @@ -48,13 +50,15 @@ Any combination of different RSC features can be enabled for a subscription: for disaster recovery and long-term retention. 2. `cloud_native_archival_encryption` - Allows cloud archival locations to be encrypted with customer managed keys. - 3. `cloud_native_protection` - Provides protection for Azure virtual machines and + 3. `cloud_native_blob_protection` - Provides protection for Azure Blob Storage + through the rules and policies of SLA Domains. + 4. `cloud_native_protection` - Provides protection for Azure virtual machines and managed disks through the rules and policies of SLA Domains. - 4. `exocompute` - Provides snapshot indexing, file recovery, storage tiering, and + 5. `exocompute` - Provides snapshot indexing, file recovery, storage tiering, and application-consistent protection of Azure objects. - 5. `sql_db_protection` - Provides centralized database backup management and + 6. `sql_db_protection` - Provides centralized database backup management and recovery in an Azure SQL Database deployment. - 6. `sql_mi_protection` - Provides centralized database backup management and + 7. `sql_mi_protection` - Provides centralized database backup management and recovery for an Azure SQL Managed Instance deployment. Each feature's `permissions` field can be used with the `polaris_azure_permissions` @@ -173,6 +177,7 @@ Required: Optional: +- `permission_groups` (Set of String) Permission groups to assign to the Cloud Native Archival feature. Possible values are `BASIC`, `ENCRYPTION` and `SQL_ARCHIVAL`. - `permissions` (String) Permissions updated signal. When this field changes, the provider will notify RSC that the permissions for the feature has been updated. Use this field with the `polaris_azure_permissions` data source. - `resource_group_name` (String) Name of the Azure resource group where RSC places all resources created by the feature. RSC assumes the resource group already exists. Changing this forces the RSC feature to be re-onboarded. - `resource_group_region` (String) Region of the Azure resource group. Should be specified in the standard Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded. @@ -196,6 +201,7 @@ Required: Optional: +- `permission_groups` (Set of String) Permission groups to assign to the Cloud Native Archival Encryption feature. Possible values are `BASIC` and `ENCRYPTION`. - `permissions` (String) Permissions updated signal. When this field changes, the provider will notify RSC that the permissions for the feature has been updated. Use this field with the `polaris_azure_permissions` data source. - `resource_group_name` (String) Name of the Azure resource group where RSC places all resources created by the feature. RSC assumes the resource group already exists. Changing this forces the RSC feature to be re-onboarded. - `resource_group_region` (String) Region of the Azure resource group. Should be specified in the standard Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded. @@ -215,6 +221,7 @@ Required: Optional: +- `permission_groups` (Set of String) Permission groups to assign to the Cloud Native Blob Protection feature. Possible values are `BASIC` and `RECOVERY`. - `permissions` (String) Permissions updated signal. When this field changes, the provider will notify RSC that the permissions for the feature has been updated. Use this field with the `polaris_azure_permissions` data source. Read-Only: @@ -231,6 +238,7 @@ Required: Optional: +- `permission_groups` (Set of String) Permission groups to assign to the Cloud Native Protection feature. Possible values are `BASIC`, `EXPORT_AND_RESTORE`, `FILE_LEVEL_RECOVERY`, `CLOUD_CLUSTER_ES` and `SNAPSHOT_PRIVATE_ACCESS`. - `permissions` (String) Permissions updated signal. When this field changes, the provider will notify RSC that the permissions for the feature has been updated. Use this field with the `polaris_azure_permissions` data source. - `resource_group_name` (String) Name of the Azure resource group where RSC places all resources created by the feature. RSC assumes the resource group already exists. Changing this forces the RSC feature to be re-onboarded. - `resource_group_region` (String) Region of the Azure resource group. Should be specified in the standard Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded. @@ -250,6 +258,7 @@ Required: Optional: +- `permission_groups` (Set of String) Permission groups to assign to the Exocompute feature. Possible values are `BASIC`, `PRIVATE_ENDPOINTS` and `CUSTOMER_MANAGED_BASIC`. - `permissions` (String) Permissions updated signal. When this field changes, the provider will notify RSC that the permissions for the feature has been updated. Use this field with the `polaris_azure_permissions` data source. - `resource_group_name` (String) Name of the Azure resource group where RSC places all resources created by the feature. RSC assumes the resource group already exists. Changing this forces the RSC feature to be re-onboarded. - `resource_group_region` (String) Region of the Azure resource group. Should be specified in the standard Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded. @@ -269,6 +278,7 @@ Required: Optional: +- `permission_groups` (Set of String) Permission groups to assign to the SQL DB Protection feature. Possible values are `BASIC`, `RECOVERY` and `BACKUP_V2`. - `permissions` (String) Permissions updated signal. When this field changes, the provider will notify RSC that the permissions for the feature has been updated. Use this field with the `polaris_azure_permissions` data source. Read-Only: @@ -285,6 +295,7 @@ Required: Optional: +- `permission_groups` (Set of String) Permission groups to assign to the SQL MI Protection feature. Possible values are `BASIC`, `RECOVERY` and `BACKUP_V2`. - `permissions` (String) Permissions updated signal. When this field changes, the provider will notify RSC that the permissions for the feature has been updated. Use this field with the `polaris_azure_permissions` data source. Read-Only: diff --git a/go.mod b/go.mod index f453b75..2a99cc1 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-docs v0.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 - github.com/rubrikinc/rubrik-polaris-sdk-for-go v0.11.0-beta.7 + github.com/rubrikinc/rubrik-polaris-sdk-for-go v0.11.0-beta.8 ) require ( diff --git a/go.sum b/go.sum index 18014cd..fab329a 100644 --- a/go.sum +++ b/go.sum @@ -270,8 +270,8 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rubrikinc/rubrik-polaris-sdk-for-go v0.11.0-beta.7 h1:n3tfrj61TLoKKkywcboFjEb/sd+G53vJV13dfV14k3Q= -github.com/rubrikinc/rubrik-polaris-sdk-for-go v0.11.0-beta.7/go.mod h1:ryJGDKlbaCvozY3Wvt+TPSN2OZRChQedHUNsnVfCbXE= +github.com/rubrikinc/rubrik-polaris-sdk-for-go v0.11.0-beta.8 h1:dU2PQJUQ4G0FcdivN2Y0/vYsu/9hZRvAYld2I9Tqvro= +github.com/rubrikinc/rubrik-polaris-sdk-for-go v0.11.0-beta.8/go.mod h1:ryJGDKlbaCvozY3Wvt+TPSN2OZRChQedHUNsnVfCbXE= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= diff --git a/internal/provider/data_source_azure_permissions.go b/internal/provider/data_source_azure_permissions.go index 69bf5b1..fe55a7a 100644 --- a/internal/provider/data_source_azure_permissions.go +++ b/internal/provider/data_source_azure_permissions.go @@ -159,6 +159,23 @@ func dataSourceAzurePermissions() *schema.Resource { "`resource_group_not_data_actions` instead.", Deprecated: "use `subscription_not_data_actions` and `resource_group_not_data_actions` instead.", }, + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "EXPORT_AND_RESTORE", "FILE_LEVEL_RECOVERY", "CLOUD_CLUSTER_ES", + "SNAPSHOT_PRIVATE_ACCESS", "PRIVATE_ENDPOINTS", "CUSTOMER_MANAGED_BASIC", + "ENCRYPTION", "SQL_ARCHIVAL", "RECOVERY", "BACKUP_V2", + }, false), + }, + Optional: true, + ConflictsWith: []string{keyFeatures}, + Description: "Permission groups for the RSC feature. Possible values are `BASIC`, " + + "`EXPORT_AND_RESTORE`, `FILE_LEVEL_RECOVERY`, `CLOUD_CLUSTER_ES`, `SNAPSHOT_PRIVATE_ACCESS`, " + + "`PRIVATE_ENDPOINTS`, `CUSTOMER_MANAGED_BASIC`, `ENCRYPTION`, `SQL_ARCHIVAL`, `RECOVERY` and " + + "`BACKUP_V2`.", + }, keyResourceGroupActions: { Type: schema.TypeList, Elem: &schema.Schema{ @@ -238,8 +255,13 @@ func azurePermissionsRead(ctx context.Context, d *schema.ResourceData, m any) di // Check both feature and features. var perms []azure.Permissions var groups []azure.PermissionGroupWithVersion - if f := d.Get(keyFeature).(string); f != "" { - perms, groups, err = azure.Wrap(client).ScopedPermissions(ctx, core.Feature{Name: f}) + if featureName := d.Get(keyFeature).(string); featureName != "" { + var permGroups []core.PermissionGroup + for _, permGroup := range d.Get(keyPermissionGroups).(*schema.Set).List() { + permGroups = append(permGroups, core.PermissionGroup(permGroup.(string))) + } + feature := core.Feature{Name: featureName, PermissionGroups: permGroups} + perms, groups, err = azure.Wrap(client).ScopedPermissions(ctx, feature) } else { var features []core.Feature for _, f := range d.Get(keyFeatures).(*schema.Set).List() { diff --git a/internal/provider/names.go b/internal/provider/names.go index 0aadd7c..7c5092a 100644 --- a/internal/provider/names.go +++ b/internal/provider/names.go @@ -111,7 +111,9 @@ const ( keyPolarisAWSPrivateContainerRegistry = "polaris_aws_private_container_registry" keyPolarisAzureArchivalLocation = "polaris_azure_archival_location" keyPolarisAzureExocompute = "polaris_azure_exocompute" + keyPolarisAzureExocomputeClusterAttachment = "polaris_azure_exocompute_cluster_attachment" keyPolarisAzurePermissions = "polaris_azure_permissions" + keyPolarisAzurePrivateContainerRegistry = "polaris_azure_private_container_registry" keyPolarisAzureServicePrincipal = "polaris_azure_service_principal" keyPolarisAzureSubscription = "polaris_azure_subscription" keyPolarisCustomRole = "polaris_custom_role" diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 1f87cd8..1164dce 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -82,6 +82,8 @@ func Provider() *schema.Provider { keyPolarisAWSPrivateContainerRegistry: resourceAwsPrivateContainerRegistry(), keyPolarisAzureArchivalLocation: resourceAzureArchivalLocation(), keyPolarisAzureExocompute: resourceAzureExocompute(), + keyPolarisAzureExocomputeClusterAttachment: resourceAzureExocomputeClusterAttachment(), + keyPolarisAzurePrivateContainerRegistry: resourceAzurePrivateContainerRegistry(), keyPolarisAzureServicePrincipal: resourceAzureServicePrincipal(), keyPolarisAzureSubscription: resourceAzureSubscription(), "polaris_cdm_bootstrap": resourceCDMBootstrap(), diff --git a/internal/provider/resource_aws_archival_location.go b/internal/provider/resource_aws_archival_location.go index 8cdabb2..80e6bcc 100644 --- a/internal/provider/resource_aws_archival_location.go +++ b/internal/provider/resource_aws_archival_location.go @@ -124,7 +124,7 @@ func resourceAwsArchivalLocation() *schema.Resource { ForceNew: true, Description: "AWS region to store the snapshots in. If not specified, the snapshots will be " + "stored in the same region as the workload. Changing this forces a new resource to be created.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(aws.AllRegionNames(), false), }, keyStorageClass: { Type: schema.TypeString, diff --git a/internal/provider/resource_aws_exocompute.go b/internal/provider/resource_aws_exocompute.go index 72a7f3d..838e408 100644 --- a/internal/provider/resource_aws_exocompute.go +++ b/internal/provider/resource_aws_exocompute.go @@ -30,8 +30,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/aws" + "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/exocompute" "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/graphql" + gqlaws "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/graphql/aws" ) const resourceAWSExocomputeDescription = ` @@ -131,7 +132,7 @@ func resourceAwsExocompute() *schema.Resource { ConflictsWith: []string{"host_account_id"}, Description: "AWS region to run the Exocompute instance in. Changing this forces a new resource " + "to be created.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(gqlaws.AllRegionNames(), false), }, keySubnets: { Type: schema.TypeSet, @@ -178,7 +179,7 @@ func awsCreateExocompute(ctx context.Context, d *schema.ResourceData, m interfac if err != nil { return diag.FromErr(err) } - err = aws.Wrap(client).MapExocompute(ctx, aws.CloudAccountID(hostID), aws.CloudAccountID(accountID)) + err = exocompute.Wrap(client).MapAWSCloudAccount(ctx, accountID, hostID) if err != nil { return diag.FromErr(err) } @@ -195,19 +196,19 @@ func awsCreateExocompute(ctx context.Context, d *schema.ResourceData, m interfac // Note that Managed and Unmanaged below refer to whether the security // groups are managed by RSC or not, and not the cluster. - var config aws.ExoConfigFunc + var config exocompute.AWSConfigurationFunc switch { case region != "" && vpcID != "" && len(subnets) > 0 && clusterSecurityGroupID != "" && nodeSecurityGroupID != "": - config = aws.Unmanaged(region, vpcID, subnets, clusterSecurityGroupID, nodeSecurityGroupID) + config = exocompute.AWSUnmanaged(gqlaws.RegionFromName(region), vpcID, subnets, clusterSecurityGroupID, nodeSecurityGroupID) case region != "" && vpcID != "" && len(subnets) > 0: - config = aws.Managed(region, vpcID, subnets) + config = exocompute.AWSManaged(gqlaws.RegionFromName(region), vpcID, subnets) case region != "": - config = aws.BYOKCluster(region) + config = exocompute.AWSBYOKCluster(gqlaws.RegionFromName(region)) default: return diag.Errorf("invalid exocompute configuration") } - id, err := aws.Wrap(client).AddExocomputeConfig(ctx, aws.CloudAccountID(accountID), config) + id, err := exocompute.Wrap(client).AddAWSConfiguration(ctx, accountID, config) if err != nil { return diag.FromErr(err) } @@ -232,7 +233,7 @@ func awsReadExocompute(ctx context.Context, d *schema.ResourceData, m interface{ if err != nil { return diag.FromErr(err) } - hostID, err := aws.Wrap(client).ExocomputeHostAccount(ctx, aws.CloudAccountID(appID)) + hostID, err := exocompute.Wrap(client).AWSHostCloudAccount(ctx, appID) if errors.Is(err, graphql.ErrNotFound) { d.SetId("") return nil @@ -249,7 +250,7 @@ func awsReadExocompute(ctx context.Context, d *schema.ResourceData, m interface{ if err != nil { return diag.FromErr(err) } - exoConfig, err := aws.Wrap(client).ExocomputeConfig(ctx, configID) + exoConfig, err := exocompute.Wrap(client).AWSConfigurationByID(ctx, configID) if errors.Is(err, graphql.ErrNotFound) { d.SetId("") return nil @@ -269,13 +270,12 @@ func awsReadExocompute(ctx context.Context, d *schema.ResourceData, m interface{ if err := d.Set(keyNodeSecurityGroupID, exoConfig.NodeSecurityGroupID); err != nil { return diag.FromErr(err) } - if err := d.Set(keyPolarisManaged, exoConfig.ManagedByRubrik); err != nil { + if err := d.Set(keyPolarisManaged, exoConfig.IsManagedByRubrik); err != nil { return diag.FromErr(err) } subnets := schema.Set{F: schema.HashString} - for _, subnet := range exoConfig.Subnets { - subnets.Add(subnet.ID) - } + subnets.Add(exoConfig.Subnet1) + subnets.Add(exoConfig.Subnet2) if err := d.Set(keySubnets, &subnets); err != nil { return diag.FromErr(err) } @@ -301,7 +301,7 @@ func awsDeleteExocompute(ctx context.Context, d *schema.ResourceData, m interfac if err != nil { return diag.FromErr(err) } - if err = aws.Wrap(client).UnmapExocompute(ctx, aws.CloudAccountID(appID)); err != nil { + if err = exocompute.Wrap(client).UnmapAWSCloudAccount(ctx, appID); err != nil { return diag.FromErr(err) } } else { @@ -309,7 +309,7 @@ func awsDeleteExocompute(ctx context.Context, d *schema.ResourceData, m interfac if err != nil { return diag.FromErr(err) } - if err = aws.Wrap(client).RemoveExocomputeConfig(ctx, configID); err != nil { + if err = exocompute.Wrap(client).RemoveAWSConfiguration(ctx, configID); err != nil { return diag.FromErr(err) } } diff --git a/internal/provider/resource_aws_exocompute_cluster_attachment.go b/internal/provider/resource_aws_exocompute_cluster_attachment.go index e07e9e8..a05c65f 100644 --- a/internal/provider/resource_aws_exocompute_cluster_attachment.go +++ b/internal/provider/resource_aws_exocompute_cluster_attachment.go @@ -28,7 +28,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/aws" + "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/exocompute" ) const awsExocomputeClusterAttachmentDescription = ` @@ -116,7 +116,7 @@ func awsCreateAwsExocomputeClusterAttachment(ctx context.Context, d *schema.Reso } clusterName := d.Get(keyClusterName).(string) - clusterID, kubectlCmd, setupYAML, err := aws.Wrap(client).AddExocomputeCluster(ctx, configID, clusterName) + clusterID, info, err := exocompute.Wrap(client).ConnectAWSCluster(ctx, clusterName, configID) if err != nil { return diag.FromErr(err) } @@ -124,16 +124,16 @@ func awsCreateAwsExocomputeClusterAttachment(ctx context.Context, d *schema.Reso // We initialize the resource fields in create instead of calling read, // this is because the read operation will fail until the customer runs the // connection command or applies the manifest. - if err := d.Set(keyConnectionCommand, kubectlCmd); err != nil { + if err := d.Set(keyConnectionCommand, info.Command); err != nil { return diag.FromErr(err) } if err := d.Set(keyConnectionCommandExecuted, false); err != nil { return diag.FromErr(err) } - if err := d.Set(keyManifest, setupYAML); err != nil { + if err := d.Set(keyManifest, info.Manifest); err != nil { return diag.FromErr(err) } - if err := d.Set(keySetupYAML, setupYAML); err != nil { + if err := d.Set(keySetupYAML, info.Manifest); err != nil { return diag.FromErr(err) } @@ -155,21 +155,21 @@ func awsReadAwsExocomputeClusterAttachment(ctx context.Context, d *schema.Resour } clusterName := d.Get(keyClusterName).(string) - kubectlCmd, setupYAML, err := aws.Wrap(client).ExocomputeCluster(ctx, configID, clusterName) + info, err := exocompute.Wrap(client).AWSClusterConnection(ctx, clusterName, configID) if err != nil { log.Printf("[INFO] failed to read cluster attachment: %s", err) return nil } - if err := d.Set(keyConnectionCommand, kubectlCmd); err != nil { + if err := d.Set(keyConnectionCommand, info.Command); err != nil { return diag.FromErr(err) } if err := d.Set(keyConnectionCommandExecuted, true); err != nil { return diag.FromErr(err) } - if err := d.Set(keyManifest, setupYAML); err != nil { + if err := d.Set(keyManifest, info.Manifest); err != nil { return diag.FromErr(err) } - if err := d.Set(keySetupYAML, setupYAML); err != nil { + if err := d.Set(keySetupYAML, info.Manifest); err != nil { return diag.FromErr(err) } @@ -199,7 +199,7 @@ func awsDeleteAwsExocomputeClusterAttachment(ctx context.Context, d *schema.Reso return diag.FromErr(err) } - if err := aws.Wrap(client).RemoveExocomputeCluster(ctx, id); err != nil { + if err := exocompute.Wrap(client).DisconnectAWSCluster(ctx, id); err != nil { return diag.FromErr(err) } diff --git a/internal/provider/resource_aws_private_container_registry.go b/internal/provider/resource_aws_private_container_registry.go index ca7ceda..d278118 100644 --- a/internal/provider/resource_aws_private_container_registry.go +++ b/internal/provider/resource_aws_private_container_registry.go @@ -28,18 +28,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/aws" + "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/pcr" ) const awsPrivateContainerRegistryDescription = ` The ´polaris_aws_private_container_registry´ resource enables the private container registry (PCR) feature for the RSC customer account. This disables the standard -Rubrik container registry. Once PCR has been enabled, it can only be disabled by -Rubrik customer support. - -!> **Note:** Creating a ´polaris_aws_private_container_registry´ resource enables - the PCR feature for the RSC customer account. Destroying the resource will not - disabled PCR, it can only be disabled by contacting Rubrik customer support. +Rubrik container registry. ~> **Note:** Even though the ´polaris_aws_private_container_registry´ resource ID is an RSC cloud account ID, there can only be a single PCR per RSC customer @@ -152,7 +147,7 @@ func awsCreatePrivateContainerRegistry(ctx context.Context, d *schema.ResourceDa } nativeID := d.Get(keyNativeID).(string) url := d.Get(keyURL).(string) - if err := aws.Wrap(client).SetPrivateContainerRegistry(ctx, aws.CloudAccountID(id), url, nativeID); err != nil { + if err := pcr.Wrap(client).SetAWSRegistry(ctx, id, nativeID, url); err != nil { return diag.FromErr(err) } @@ -161,7 +156,6 @@ func awsCreatePrivateContainerRegistry(ctx context.Context, d *schema.ResourceDa return nil } -// There is no API endpoint to read the state of the private container registry. func awsReadPrivateContainerRegistry(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Print("[TRACE] awsReadPrivateContainerRegistry") @@ -174,15 +168,15 @@ func awsReadPrivateContainerRegistry(ctx context.Context, d *schema.ResourceData if err != nil { return diag.FromErr(err) } - nativeID, url, err := aws.Wrap(client).PrivateContainerRegistry(ctx, aws.CloudAccountID(id)) + pcrInfo, err := pcr.Wrap(client).AWSRegistry(ctx, id) if err != nil { return diag.FromErr(err) } - if err := d.Set(keyNativeID, nativeID); err != nil { + if err := d.Set(keyNativeID, pcrInfo.PCRDetails.ImagePullDetails.NativeID); err != nil { return diag.FromErr(err) } - if err := d.Set(keyURL, url); err != nil { + if err := d.Set(keyURL, pcrInfo.PCRDetails.RegistryURL); err != nil { return diag.FromErr(err) } @@ -203,16 +197,30 @@ func awsUpdatePrivateContainerRegistry(ctx context.Context, d *schema.ResourceDa } nativeID := d.Get(keyNativeID).(string) url := d.Get(keyURL).(string) - if err := aws.Wrap(client).SetPrivateContainerRegistry(ctx, aws.CloudAccountID(id), url, nativeID); err != nil { + if err := pcr.Wrap(client).SetAWSRegistry(ctx, id, nativeID, url); err != nil { return diag.FromErr(err) } return nil } -// There is no API endpoint to remove the private container registry from the -// account. func awsDeletePrivateContainerRegistry(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Print("[TRACE] awsDeletePrivateContainerRegistry") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + id, err := uuid.Parse(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if err := pcr.Wrap(client).RemoveRegistry(ctx, id); err != nil { + return diag.FromErr(err) + } + + d.SetId("") return nil } diff --git a/internal/provider/resource_azure_archival_location.go b/internal/provider/resource_azure_archival_location.go index d8b0469..f36b038 100644 --- a/internal/provider/resource_azure_archival_location.go +++ b/internal/provider/resource_azure_archival_location.go @@ -133,7 +133,7 @@ func resourceAzureArchivalLocation() *schema.Resource { ForceNew: true, Description: "Azure region to store the snapshots in. If not specified, the snapshots will be stored " + "in the same region as the workload. Changing this forces a new resource to be created.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(azure.AllRegionNames(), false), }, keyStorageAccountTags: { Type: schema.TypeMap, @@ -317,7 +317,7 @@ func customerKeyResource() *schema.Resource { Required: true, Description: "The region in which the key will be used. Regions without customer managed keys will " + "use platform managed keys.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(azure.AllRegionNames(), false), }, keyVaultName: { Type: schema.TypeString, diff --git a/internal/provider/resource_azure_exocompute.go b/internal/provider/resource_azure_exocompute.go index 7db226e..3d3255a 100644 --- a/internal/provider/resource_azure_exocompute.go +++ b/internal/provider/resource_azure_exocompute.go @@ -30,8 +30,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/azure" + "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/exocompute" "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/graphql" + gqlazure "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/graphql/azure" ) const resourceAzureExocomputeDescription = ` @@ -111,7 +112,7 @@ func resourceAzureExocompute() *schema.Resource { ForceNew: true, Description: "Azure region to run the exocompute service in. Should be specified in the standard " + "Azure style, e.g. `eastus`. Changing this forces a new resource to be created.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(gqlazure.AllRegionNames(), false), }, keySubnet: { Type: schema.TypeString, @@ -164,20 +165,23 @@ func azureCreateExocompute(ctx context.Context, d *schema.ResourceData, m interf if err != nil { return diag.FromErr(err) } - err = azure.Wrap(client).MapExocompute(ctx, azure.CloudAccountID(hostCloudAccountID), azure.CloudAccountID(accountID)) + err = exocompute.Wrap(client).MapAWSCloudAccount(ctx, accountID, hostCloudAccountID) if err != nil { return diag.FromErr(err) } d.SetId(appCloudAccountPrefix + accountID.String()) } else { - var exoConfig azure.ExoConfigFunc + var exoConfig exocompute.AzureConfigurationFunc + region := gqlazure.RegionFromName(d.Get(keyRegion).(string)) if podOverlayNetworkCIDR, ok := d.GetOk(keyPodOverlayNetworkCIDR); ok { - exoConfig = azure.ManagedWithOverlayNetwork(d.Get(keyRegion).(string), d.Get(keySubnet).(string), + exoConfig = exocompute.AzureManagedWithOverlayNetwork(region, d.Get(keySubnet).(string), podOverlayNetworkCIDR.(string)) + } else if subnet, ok := d.GetOk(keySubnet); ok { + exoConfig = exocompute.AzureManaged(region, subnet.(string)) } else { - exoConfig = azure.Managed(d.Get(keyRegion).(string), d.Get(keySubnet).(string)) + exoConfig = exocompute.AzureBYOKCluster(region) } - exoConfigID, err := azure.Wrap(client).AddExocomputeConfig(ctx, azure.CloudAccountID(accountID), exoConfig) + exoConfigID, err := exocompute.Wrap(client).AddAzureConfiguration(ctx, accountID, exoConfig) if err != nil { return diag.FromErr(err) } @@ -202,7 +206,7 @@ func azureReadExocompute(ctx context.Context, d *schema.ResourceData, m interfac return diag.FromErr(err) } - hostID, err := azure.Wrap(client).ExocomputeHostAccount(ctx, azure.CloudAccountID(appID)) + hostID, err := exocompute.Wrap(client).AzureHostCloudAccount(ctx, appID) if errors.Is(err, graphql.ErrNotFound) { d.SetId("") return nil @@ -220,7 +224,7 @@ func azureReadExocompute(ctx context.Context, d *schema.ResourceData, m interfac return diag.FromErr(err) } - exoConfig, err := azure.Wrap(client).ExocomputeConfig(ctx, exoConfigID) + exoConfig, err := exocompute.Wrap(client).AzureConfigurationByID(ctx, exoConfigID) if errors.Is(err, graphql.ErrNotFound) { d.SetId("") return nil @@ -229,7 +233,7 @@ func azureReadExocompute(ctx context.Context, d *schema.ResourceData, m interfac return diag.FromErr(err) } - if err := d.Set(keyRegion, exoConfig.Region); err != nil { + if err := d.Set(keyRegion, exoConfig.Region.Name()); err != nil { return diag.FromErr(err) } if err := d.Set(keySubnet, exoConfig.SubnetID); err != nil { @@ -256,7 +260,7 @@ func azureDeleteExocompute(ctx context.Context, d *schema.ResourceData, m interf if err != nil { return diag.FromErr(err) } - err = azure.Wrap(client).UnmapExocompute(ctx, azure.CloudAccountID(appID)) + err = exocompute.Wrap(client).UnmapAzureCloudAccount(ctx, appID) if err != nil { return diag.FromErr(err) } @@ -266,7 +270,7 @@ func azureDeleteExocompute(ctx context.Context, d *schema.ResourceData, m interf return diag.FromErr(err) } - err = azure.Wrap(client).RemoveExocomputeConfig(ctx, exoConfigID) + err = exocompute.Wrap(client).RemoveAzureConfiguration(ctx, exoConfigID) if err != nil { return diag.FromErr(err) } diff --git a/internal/provider/resource_azure_exocompute_cluster_attachment.go b/internal/provider/resource_azure_exocompute_cluster_attachment.go new file mode 100644 index 0000000..e376d15 --- /dev/null +++ b/internal/provider/resource_azure_exocompute_cluster_attachment.go @@ -0,0 +1,170 @@ +// Copyright 2024 Rubrik, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +package provider + +import ( + "context" + "log" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/exocompute" +) + +const azureExocomputeClusterAttachmentDescription = ` +The ´polaris_azure_exocompute_cluster_attachment´ resource attaches an Azure AKS +cluster to a customer managed host Exocompute configuration, allowing RSC to use +the cluster for Exocompute operations. + +The cluster name must be specified as ´/´, e.g. +´my-resource-group/my-cluster´. +` + +func resourceAzureExocomputeClusterAttachment() *schema.Resource { + return &schema.Resource{ + CreateContext: azureCreateAwsExocomputeClusterAttachment, + ReadContext: azureReadAwsExocomputeClusterAttachment, + UpdateContext: azureUpdateAwsExocomputeClusterAttachment, + DeleteContext: azureDeleteAwsExocomputeClusterAttachment, + + Description: description(azureExocomputeClusterAttachmentDescription), + Schema: map[string]*schema.Schema{ + keyID: { + Type: schema.TypeString, + Computed: true, + Description: "RSC cluster ID (UUID).", + }, + keyClusterName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Azure AKS cluster name. Changing this forces a new resource to be created.", + ValidateFunc: validation.StringIsNotWhiteSpace, + }, + keyExocomputeID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "RSC exocompute configuration ID (UUID). Changing this forces a new resource to be " + + "created.", + ValidateFunc: validation.IsUUID, + }, + keyManifest: { + Type: schema.TypeString, + Computed: true, + Description: "Kubernetes manifest which can be passed to `kubectl apply` to create a connection " + + "between the cluster and RSC. See `" + keyConnectionCommand + "` for an alternative connection " + + "method.", + }, + keyTokenRefresh: { + Type: schema.TypeInt, + Optional: true, + Description: "To force a refresh of the authentication token, part of the connection command and " + + "manifest, increase the value of this field. The token is valid for 24 hours.", + }, + }, + } +} + +func azureCreateAwsExocomputeClusterAttachment(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureCreateAwsExocomputeClusterAttachment") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + configID, err := uuid.Parse(d.Get(keyExocomputeID).(string)) + if err != nil { + return diag.FromErr(err) + } + clusterName := d.Get(keyClusterName).(string) + + clusterID, info, err := exocompute.Wrap(client).ConnectAzureCluster(ctx, clusterName, configID) + if err != nil { + return diag.FromErr(err) + } + if err := d.Set(keyManifest, info.Manifest); err != nil { + return diag.FromErr(err) + } + + d.SetId(clusterID.String()) + return nil +} + +func azureReadAwsExocomputeClusterAttachment(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureReadAwsExocomputeClusterAttachment") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + configID, err := uuid.Parse(d.Get(keyExocomputeID).(string)) + if err != nil { + return diag.FromErr(err) + } + clusterName := d.Get(keyClusterName).(string) + + info, err := exocompute.Wrap(client).AzureClusterConnection(ctx, clusterName, configID) + if err != nil { + log.Printf("[INFO] failed to read cluster attachment: %s", err) + return nil + } + if err := d.Set(keyManifest, info.Manifest); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func azureUpdateAwsExocomputeClusterAttachment(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureUpdateAwsExocomputeClusterAttachment") + + if d.HasChange(keyTokenRefresh) { + return awsCreateAwsExocomputeClusterAttachment(ctx, d, m) + } + + return nil +} + +func azureDeleteAwsExocomputeClusterAttachment(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureDeleteAwsExocomputeClusterAttachment") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + id, err := uuid.Parse(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if err := exocompute.Wrap(client).DisconnectAzureCluster(ctx, id); err != nil { + return diag.FromErr(err) + } + + d.SetId("") + return nil +} diff --git a/internal/provider/resource_azure_private_container_registry.go b/internal/provider/resource_azure_private_container_registry.go new file mode 100644 index 0000000..330b90c --- /dev/null +++ b/internal/provider/resource_azure_private_container_registry.go @@ -0,0 +1,217 @@ +// Copyright 2024 Rubrik, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +package provider + +import ( + "context" + "log" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/pcr" +) + +const azurePrivateContainerRegistryDescription = ` +The ´polaris_azure_private_container_registry´ resource enables the private +container registry (PCR) feature for the RSC customer account. This disables the +standard Rubrik container registry. + +~> **Note:** Even though the ´polaris_azure_private_container_registry´ resource + ID is an RSC cloud account ID, there can only be a single PCR per RSC + customer account. + +## Exocompute Image Bundles +The following GraphQL query can be used to retrieve information about the image +bundles used by RSC for exocompute: +´´´graphql +query ExotaskImageBundle { + exotaskImageBundle { + bundleImages { + name + sha + tag + } + bundleVersion + eksVersion + repoUrl + } +} +´´´ +The ´repoUrl´ field holds the URL to the RSC container registry from where the +RSC images can be pulled. + +The following GraphQL mutation can be used to set the approved bundle version +for the RSC customer account: +´´´graphql +mutation SetBundleApprovalStatus($input: SetBundleApprovalStatusInput!) { + setBundleApprovalStatus(input: $input) +} +´´´ +The input is an object with the following structure: +´´´json +{ + "input": { + "approvalStatus": "APPROVED", + "bundleVersion": "1.164", + } +} +´´´ +Where ´approvalStatus´ can be either ´APPROVED´ or ´REJECTED´. ´bundleVersion´ +is the the bundle version being approved or rejected. ´bundleMetadata´ is +optional. +` + +func resourceAzurePrivateContainerRegistry() *schema.Resource { + return &schema.Resource{ + CreateContext: azureCreatePrivateContainerRegistry, + ReadContext: azureReadPrivateContainerRegistry, + UpdateContext: azureUpdatePrivateContainerRegistry, + DeleteContext: azureDeletePrivateContainerRegistry, + + Description: description(azurePrivateContainerRegistryDescription), + Schema: map[string]*schema.Schema{ + keyID: { + Type: schema.TypeString, + Computed: true, + Description: "RSC cloud account ID (UUID).", + }, + keyCloudAccountID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "RSC cloud account ID (UUID). Changing this forces a new resource to be created.", + ValidateFunc: validation.IsUUID, + }, + keyAppID: { + Type: schema.TypeString, + Required: true, + Description: "Azure app registration application ID. Also known as the client ID.", + ValidateFunc: validation.IsUUID, + }, + keyURL: { + Type: schema.TypeString, + Required: true, + Description: "URL for customer provided private container registry.", + ValidateFunc: validation.StringIsNotWhiteSpace, + }, + }, + } +} + +func azureCreatePrivateContainerRegistry(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureCreatePrivateContainerRegistry") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + id, err := uuid.Parse(d.Get(keyCloudAccountID).(string)) + if err != nil { + return diag.FromErr(err) + } + appID, err := uuid.Parse(d.Get(keyAppID).(string)) + if err != nil { + return diag.FromErr(err) + } + url := d.Get(keyURL).(string) + if err := pcr.Wrap(client).SetAzureRegistry(ctx, id, appID, url); err != nil { + return diag.FromErr(err) + } + + d.SetId(id.String()) + awsReadPrivateContainerRegistry(ctx, d, m) + return nil +} + +func azureReadPrivateContainerRegistry(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureReadPrivateContainerRegistry") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + id, err := uuid.Parse(d.Id()) + if err != nil { + return diag.FromErr(err) + } + pcrInfo, err := pcr.Wrap(client).AzureRegistry(ctx, id) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set(keyAppID, pcrInfo.PCRDetails.ImagePullDetails.CustomerAppId); err != nil { + return diag.FromErr(err) + } + if err := d.Set(keyURL, pcrInfo.PCRDetails.RegistryURL); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func azureUpdatePrivateContainerRegistry(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureUpdatePrivateContainerRegistry") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + id, err := uuid.Parse(d.Id()) + if err != nil { + return diag.FromErr(err) + } + appID, err := uuid.Parse(d.Id()) + if err != nil { + return diag.FromErr(err) + } + url := d.Get(keyURL).(string) + if err := pcr.Wrap(client).SetAzureRegistry(ctx, id, appID, url); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func azureDeletePrivateContainerRegistry(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Print("[TRACE] azureDeletePrivateContainerRegistry") + + client, err := m.(*client).polaris() + if err != nil { + return diag.FromErr(err) + } + + id, err := uuid.Parse(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if err := pcr.Wrap(client).RemoveRegistry(ctx, id); err != nil { + return diag.FromErr(err) + } + + d.SetId("") + return nil +} diff --git a/internal/provider/resource_azure_subscription.go b/internal/provider/resource_azure_subscription.go index 8794763..7fb017a 100644 --- a/internal/provider/resource_azure_subscription.go +++ b/internal/provider/resource_azure_subscription.go @@ -35,6 +35,7 @@ import ( "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris" "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/azure" "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/graphql" + gqlazure "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/graphql/azure" "github.com/rubrikinc/rubrik-polaris-sdk-for-go/pkg/polaris/graphql/core" ) @@ -49,13 +50,15 @@ Any combination of different RSC features can be enabled for a subscription: for disaster recovery and long-term retention. 2. ´cloud_native_archival_encryption´ - Allows cloud archival locations to be encrypted with customer managed keys. - 3. ´cloud_native_protection´ - Provides protection for Azure virtual machines and + 3. ´cloud_native_blob_protection´ - Provides protection for Azure Blob Storage + through the rules and policies of SLA Domains. + 4. ´cloud_native_protection´ - Provides protection for Azure virtual machines and managed disks through the rules and policies of SLA Domains. - 4. ´exocompute´ - Provides snapshot indexing, file recovery, storage tiering, and + 5. ´exocompute´ - Provides snapshot indexing, file recovery, storage tiering, and application-consistent protection of Azure objects. - 5. ´sql_db_protection´ - Provides centralized database backup management and + 6. ´sql_db_protection´ - Provides centralized database backup management and recovery in an Azure SQL Database deployment. - 6. ´sql_mi_protection´ - Provides centralized database backup management and + 7. ´sql_mi_protection´ - Provides centralized database backup management and recovery for an Azure SQL Managed Instance deployment. Each feature's ´permissions´ field can be used with the ´polaris_azure_permissions´ @@ -95,6 +98,18 @@ func resourceAzureSubscription() *schema.Resource { Type: schema.TypeList, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "ENCRYPTION", "SQL_ARCHIVAL", + }, false), + }, + Optional: true, + Description: "Permission groups to assign to the Cloud Native Archival feature. " + + "Possible values are `BASIC`, `ENCRYPTION` and `SQL_ARCHIVAL`.", + }, keyPermissions: { Type: schema.TypeString, Optional: true, @@ -132,7 +147,7 @@ func resourceAzureSubscription() *schema.Resource { }, Description: "Region of the Azure resource group. Should be specified in the standard " + "Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(gqlazure.AllRegionNames(), false), }, keyResourceGroupTags: { Type: schema.TypeMap, @@ -169,6 +184,18 @@ func resourceAzureSubscription() *schema.Resource { Type: schema.TypeList, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "ENCRYPTION", + }, false), + }, + Optional: true, + Description: "Permission groups to assign to the Cloud Native Archival Encryption " + + "feature. Possible values are `BASIC` and `ENCRYPTION`.", + }, keyPermissions: { Type: schema.TypeString, Optional: true, @@ -206,7 +233,7 @@ func resourceAzureSubscription() *schema.Resource { }, Description: "Region of the Azure resource group. Should be specified in the standard " + "Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(gqlazure.AllRegionNames(), false), }, keyResourceGroupTags: { Type: schema.TypeMap, @@ -244,7 +271,7 @@ func resourceAzureSubscription() *schema.Resource { Required: true, Description: "User-assigned managed identity region. Should be specified in the " + "standard Azure style, e.g. `eastus`.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(gqlazure.AllRegionNames(), false), }, keyUserAssignedManagedIdentityResourceGroupName: { Type: schema.TypeString, @@ -265,6 +292,18 @@ func resourceAzureSubscription() *schema.Resource { Type: schema.TypeList, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "RECOVERY", + }, false), + }, + Optional: true, + Description: "Permission groups to assign to the Cloud Native Blob Protection feature. " + + "Possible values are `BASIC` and `RECOVERY`.", + }, keyPermissions: { Type: schema.TypeString, Optional: true, @@ -305,6 +344,20 @@ func resourceAzureSubscription() *schema.Resource { Type: schema.TypeList, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "EXPORT_AND_RESTORE", "FILE_LEVEL_RECOVERY", "CLOUD_CLUSTER_ES", + "SNAPSHOT_PRIVATE_ACCESS", + }, false), + }, + Optional: true, + Description: "Permission groups to assign to the Cloud Native Protection feature. " + + "Possible values are `BASIC`, `EXPORT_AND_RESTORE`, `FILE_LEVEL_RECOVERY`, " + + "`CLOUD_CLUSTER_ES` and `SNAPSHOT_PRIVATE_ACCESS`.", + }, keyPermissions: { Type: schema.TypeString, Optional: true, @@ -342,7 +395,7 @@ func resourceAzureSubscription() *schema.Resource { }, Description: "Region of the Azure resource group. Should be specified in the standard " + "Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(gqlazure.AllRegionNames(), false), }, keyResourceGroupTags: { Type: schema.TypeMap, @@ -385,6 +438,18 @@ func resourceAzureSubscription() *schema.Resource { Type: schema.TypeList, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "PRIVATE_ENDPOINTS", "CUSTOMER_MANAGED_BASIC", + }, false), + }, + Optional: true, + Description: "Permission groups to assign to the Exocompute feature. Possible values " + + "are `BASIC`, `PRIVATE_ENDPOINTS` and `CUSTOMER_MANAGED_BASIC`.", + }, keyPermissions: { Type: schema.TypeString, Optional: true, @@ -422,7 +487,7 @@ func resourceAzureSubscription() *schema.Resource { }, Description: "Region of the Azure resource group. Should be specified in the standard " + "Azure style, e.g. `eastus`. Changing this forces the RSC feature to be re-onboarded.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(gqlazure.AllRegionNames(), false), }, keyResourceGroupTags: { Type: schema.TypeMap, @@ -459,6 +524,18 @@ func resourceAzureSubscription() *schema.Resource { Type: schema.TypeList, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "RECOVERY", "BACKUP_V2", + }, false), + }, + Optional: true, + Description: "Permission groups to assign to the SQL DB Protection feature. " + + "Possible values are `BASIC`, `RECOVERY` and `BACKUP_V2`.", + }, keyPermissions: { Type: schema.TypeString, Optional: true, @@ -499,6 +576,18 @@ func resourceAzureSubscription() *schema.Resource { Type: schema.TypeList, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + keyPermissionGroups: { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "BASIC", "RECOVERY", "BACKUP_V2", + }, false), + }, + Optional: true, + Description: "Permission groups to assign to the SQL MI Protection feature. " + + "Possible values are `BASIC`, `RECOVERY` and `BACKUP_V2`.", + }, keyPermissions: { Type: schema.TypeString, Optional: true, @@ -683,7 +772,7 @@ func azureUpdateSubscription(ctx context.Context, d *schema.ResourceData, m any) opAddFeature = iota opRemoveFeature opTemporaryRemoveFeature - opUpdateRegions + opUpdateSubscription opUpdatePermissions ) type updateOp struct { @@ -718,8 +807,10 @@ func azureUpdateSubscription(ctx context.Context, d *schema.ResourceData, m any) oldBlock := oldBlock.([]any)[0].(map[string]any) newBlock := newBlock.([]any)[0].(map[string]any) - switch { - case diffAzureFeatureResourceGroup(oldBlock, newBlock) || diffAzureUserAssignedManagedIdentity(oldBlock, newBlock): + // Changes in resource group or managed identity requires the + // feature to be re-onboarded, any other changes to the feature will + // be updated when the feature is re-onboarded. + if diffAzureFeatureResourceGroup(oldBlock, newBlock) || diffAzureUserAssignedManagedIdentity(oldBlock, newBlock) { updates = append(updates, updateOp{ op: opAddFeature, feature: feature.feature, @@ -731,15 +822,16 @@ func azureUpdateSubscription(ctx context.Context, d *schema.ResourceData, m any) feature: feature.feature, order: feature.orderSplitRemove, }) - - case diffAzureFeatureRegions(oldBlock, newBlock): + continue + } + if diffAzureFeaturePermissionGroups(oldBlock, newBlock) || diffAzureFeatureRegions(oldBlock, newBlock) { updates = append(updates, updateOp{ - op: opUpdateRegions, + op: opUpdateSubscription, feature: feature.feature, block: newBlock, }) - - case newBlock[keyPermissions] != oldBlock[keyPermissions]: + } + if diffAzureFeaturePermissions(newBlock, oldBlock) { updates = append(updates, updateOp{ op: opUpdatePermissions, feature: feature.feature, @@ -772,7 +864,10 @@ func azureUpdateSubscription(ctx context.Context, d *schema.ResourceData, m any) if err := azure.Wrap(client).RemoveSubscription(ctx, azure.CloudAccountID(accountID), feature, deleteSnapshots); err != nil { return diag.FromErr(err) } - case opUpdateRegions: + case opUpdateSubscription: + for _, permGroup := range update.block[keyPermissionGroups].(*schema.Set).List() { + feature = feature.WithPermissionGroups(core.PermissionGroup(permGroup.(string))) + } var opts []azure.OptionFunc for _, region := range update.block[keyRegions].(*schema.Set).List() { opts = append(opts, azure.Region(region.(string))) @@ -813,6 +908,7 @@ func azureDeleteSubscription(ctx context.Context, d *schema.ResourceData, m any) return diag.FromErr(err) } + // Remove features in the correct order. featureKeys := make([]featureKey, 0, len(azureKeyFeatureMap)) for key, feature := range azureKeyFeatureMap { featureKeys = append(featureKeys, featureKey{key: key, feature: feature.feature, order: feature.orderRemove}) @@ -827,7 +923,8 @@ func azureDeleteSubscription(ctx context.Context, d *schema.ResourceData, m any) } deleteSnapshots := d.Get(keyDeleteSnapshotsOnDestroy).(bool) - if err = azure.Wrap(client).RemoveSubscription(ctx, azure.CloudAccountID(accountID), featureKey.feature, deleteSnapshots); err != nil { + err = azure.Wrap(client).RemoveSubscription(ctx, azure.CloudAccountID(accountID), featureKey.feature, deleteSnapshots) + if err != nil && !errors.Is(err, graphql.ErrNotFound) { return diag.FromErr(err) } } @@ -942,6 +1039,12 @@ func addAzureFeature(ctx context.Context, d *schema.ResourceData, client *polari opts = append(opts, miOpt) } + if permGroups, ok := block[keyPermissionGroups]; ok { + for _, permGroup := range permGroups.(*schema.Set).List() { + feature = feature.WithPermissionGroups(core.PermissionGroup(permGroup.(string))) + } + } + return azure.Wrap(client).AddSubscription(ctx, azure.Subscription(id, d.Get(keyTenantDomain).(string)), feature, opts...) } @@ -954,6 +1057,12 @@ func updateAzureFeatureState(d *schema.ResourceData, key string, feature azure.F block = make(map[string]any) } + permGroups := schema.Set{F: schema.HashString} + for _, permGroup := range feature.PermissionGroups { + permGroups.Add(string(permGroup)) + } + block[keyPermissionGroups] = &permGroups + regions := schema.Set{F: schema.HashString} for _, region := range feature.Regions { regions.Add(region) @@ -1055,6 +1164,37 @@ func diffAzureFeatureRegions(oldBlock, newBlock map[string]any) bool { return !slices.Equal(oldRegions, newRegions) } +// diffAzureFeaturePermissionGroups returns true if the old and new permission +// groups blocks are different. +func diffAzureFeaturePermissionGroups(oldBlock, newBlock map[string]any) bool { + var oldPermGroups []string + if v, ok := oldBlock[keyPermissionGroups]; ok { + for _, permGroup := range v.(*schema.Set).List() { + oldPermGroups = append(oldPermGroups, permGroup.(string)) + } + } + var newPermGroups []string + if v, ok := newBlock[keyPermissionGroups]; ok { + for _, permGroup := range v.(*schema.Set).List() { + newPermGroups = append(newPermGroups, permGroup.(string)) + } + } + slices.SortFunc(oldPermGroups, func(i, j string) int { + return cmp.Compare(i, j) + }) + slices.SortFunc(newPermGroups, func(i, j string) int { + return cmp.Compare(i, j) + }) + + return !slices.Equal(oldPermGroups, newPermGroups) +} + +// diffAzureFeaturePermissionGroups returns true if the old and new permissions +// strings are different. +func diffAzureFeaturePermissions(oldBlock, newBlock map[string]any) bool { + return oldBlock[keyPermissions].(string) != newBlock[keyPermissions].(string) +} + // diffAzureFeatureResourceGroup returns true if the old and new resource group // blocks are different. func diffAzureFeatureResourceGroup(oldBlock, newBlock map[string]any) bool { diff --git a/internal/provider/resource_data_center_archival_location_amazon_s3.go b/internal/provider/resource_data_center_archival_location_amazon_s3.go index f694614..c206f44 100644 --- a/internal/provider/resource_data_center_archival_location_amazon_s3.go +++ b/internal/provider/resource_data_center_archival_location_amazon_s3.go @@ -294,7 +294,7 @@ func resourceDataCenterArchivalLocationAmazonS3() *schema.Resource { Type: schema.TypeString, Required: true, Description: "AWS region.", - ValidateFunc: validation.StringIsNotWhiteSpace, + ValidateFunc: validation.StringInSlice(aws.AllRegionNames(), false), }, keyRetrievalTier: { Type: schema.TypeString,