From bca7a3b4a4f2ddad47a82f16ff7e61549366dc06 Mon Sep 17 00:00:00 2001 From: Richard Case Date: Fri, 29 Sep 2023 19:56:20 +0200 Subject: [PATCH] feat: add e2e that ensures v2prov doesn't break This change adds a new e2e test suite that checks that installing Rancher Turtles doesn't break v2prov. The test will provision a RKE2 cluster in Azure. It will then connect to the cluster via the kubeconfig. Signed-off-by: Richard Case --- Makefile | 2 +- test/e2e/config/operator.yaml | 3 + test/e2e/const.go | 11 ++ test/e2e/data/rancher/azure-cluster.yaml | 101 +++++++++++ test/e2e/data/rancher/azure-rke-config.yaml | 42 +++++ test/e2e/data/rancher/ingress.yaml | 2 +- .../data/rancher/rancher-service-patch.yaml | 2 +- test/e2e/flags.go | 61 +++++++ .../e2e/suites/import-gitops/import_gitops.go | 3 +- .../import-gitops/import_gitops_test.go | 8 +- test/e2e/suites/import-gitops/suite_test.go | 82 +++------ test/e2e/suites/v2prov/suite_test.go | 161 +++++++++++++++++ test/e2e/suites/v2prov/v2prov_test.go | 166 ++++++++++++++++++ test/framework/kube_helper.go | 19 +- test/framework/rancher_helpers.go | 64 ++++++- test/testenv/rancher.go | 40 +++-- 16 files changed, 679 insertions(+), 88 deletions(-) create mode 100644 test/e2e/data/rancher/azure-cluster.yaml create mode 100644 test/e2e/data/rancher/azure-rke-config.yaml create mode 100644 test/e2e/flags.go create mode 100644 test/e2e/suites/v2prov/suite_test.go create mode 100644 test/e2e/suites/v2prov/v2prov_test.go diff --git a/Makefile b/Makefile index 989122321..7cbc9ba35 100644 --- a/Makefile +++ b/Makefile @@ -485,7 +485,7 @@ e2e-image: ## Build the image for e2e tests .PHONY: compile-e2e e2e-compile: ## Test e2e compilation - go test -c -o /dev/null -tags=e2e ./test/e2e/suites/import-gitops + go test -c -o /dev/null -tags=e2e ./test/e2e/suites/*** ## -------------------------------------- ## Documentation diff --git a/test/e2e/config/operator.yaml b/test/e2e/config/operator.yaml index bce27d4d2..71328b1d8 100644 --- a/test/e2e/config/operator.yaml +++ b/test/e2e/config/operator.yaml @@ -8,11 +8,13 @@ images: intervals: default/wait-controllers: ["3m", "10s"] default/wait-rancher: ["15m", "30s"] + default/wait-v2prov-create: ["25m", "30s"] default/wait-capa-create-cluster: ["30m", "30s"] default/wait-gitea: ["3m", "10s"] default/wait-consistently: ["30s", "5s"] default/wait-getservice: ["60s", "5s"] default/wait-eks-delete: ["20m", "30s"] + default/wait-azure-delete: ["20m", "30s"] variables: RANCHER_VERSION: "v2.7.6" @@ -20,6 +22,7 @@ variables: RANCHER_FEATURES: "embedded-cluster-api=false" RANCHER_PATH: "rancher-stable/rancher" KUBERNETES_VERSION: "v1.26.3" + RKE2_VERSION: "v1.26.8+rke2r1" CAPI_INFRASTRUCTURE: "docker:v1.4.6" CAPI_CORE: "cluster-api:v1.4.6" RANCHER_REPO_NAME: "rancher-stable" diff --git a/test/e2e/const.go b/test/e2e/const.go index ac7909a37..6e77bc27d 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -50,6 +50,12 @@ var ( //go:embed data/rancher/nginx-ingress.yaml NginxIngress []byte + + //go:embed data/rancher/azure-rke-config.yaml + V2ProvAzureRkeConfig []byte + + //go:embed data/rancher/azure-cluster.yaml + V2ProvAzureCluster []byte ) const ( @@ -83,7 +89,12 @@ const ( GiteaUserNameVar = "GITEA_USER_NAME" GiteaUserPasswordVar = "GITEA_USER_PWD" + RKE2VersionVar = "RKE2_VERSION" + CapaEncodedCredentialsVar = "CAPA_ENCODED_CREDS" + AzureSubIDVar = "AZURE_SUBSCRIPTION_ID" + AzureClientIDVar = "AZURE_CLIENT_ID" + AzureClientSecretVar = "AZURE_CLIENT_SECRET" AuthSecretName = "basic-auth-secret" diff --git a/test/e2e/data/rancher/azure-cluster.yaml b/test/e2e/data/rancher/azure-cluster.yaml new file mode 100644 index 000000000..fd5bf8730 --- /dev/null +++ b/test/e2e/data/rancher/azure-cluster.yaml @@ -0,0 +1,101 @@ +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +metadata: + annotations: + field.cattle.io/creatorId: ${USER} + name: ${CLUSTER_NAME} + namespace: fleet-default +spec: + cloudCredentialSecretName: ${CREDENTIAL_SECRET} + kubernetesVersion: ${KUBERNETES_VERSION} + localClusterAuthEndpoint: {} + rkeConfig: + chartValues: + rke2-calico: {} + etcd: + snapshotRetention: 5 + snapshotScheduleCron: 0 */5 * * * + machineGlobalConfig: + cni: calico + disable-kube-proxy: false + etcd-expose-metrics: false + machinePoolDefaults: {} + machinePools: + - controlPlaneRole: true + dynamicSchemaSpec: '{"resourceFields":{"acceleratedNetworking":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Specify + if an Accelerated Networking NIC should be created for your VM"},"availabilitySet":{"type":"string","default":{"stringValue":"docker-machine","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Availability Set to place the virtual machine into"},"availabilityZone":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Specify + the Availability Zones the Azure resources should be created in"},"clientId":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Service Principal Account ID (optional, browser auth is used if not specified)"},"clientSecret":{"type":"password","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Service Principal Account password (optional, browser auth is used if not + specified)"},"customData":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"File + contents for customData"},"diskSize":{"type":"string","default":{"stringValue":"30","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Disk + size if using managed disk"},"dns":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"A + unique DNS label for the public IP adddress"},"dockerPort":{"type":"string","default":{"stringValue":"2376","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Port + number for Docker engine"},"enablePublicIpStandardSku":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Specify + if a Standard SKU should be used for the Public IP of the Azure VM"},"environment":{"type":"string","default":{"stringValue":"AzurePublicCloud","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + environment (e.g. AzurePublicCloud, AzureChinaCloud)"},"faultDomainCount":{"type":"string","default":{"stringValue":"3","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Fault + domain count to use for availability set"},"image":{"type":"string","default":{"stringValue":"canonical:UbuntuServer:18.04-LTS:latest","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + virtual machine OS image"},"location":{"type":"string","default":{"stringValue":"westus","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + region to create the virtual machine"},"managedDisks":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Configures + VM and availability set for managed disks"},"noPublicIp":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Do + not create a public IP address for the machine"},"nsg":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Network Security Group to assign this node to (accepts either a name or resource + ID, default is to create a new NSG for each machine)"},"openPort":{"type":"array[string]","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"nullable":true,"create":true,"update":true,"description":"Make + the specified port number accessible from the Internet"},"plan":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Purchase + plan for Azure Virtual Machine (in \u003cpublisher\u003e:\u003cproduct\u003e:\u003cplan\u003e + format)"},"privateIpAddress":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Specify + a static private IP address for the machine"},"resourceGroup":{"type":"string","default":{"stringValue":"docker-machine","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Resource Group name (will be created if missing)"},"size":{"type":"string","default":{"stringValue":"Standard_D2_v2","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Size + for Azure Virtual Machine"},"sshUser":{"type":"string","default":{"stringValue":"docker-user","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Username + for SSH login"},"staticPublicIp":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Assign + a static public IP address to the machine"},"storageType":{"type":"string","default":{"stringValue":"Standard_LRS","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Type + of Storage Account to host the OS Disk for the machine"},"subnet":{"type":"string","default":{"stringValue":"docker-machine","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Subnet Name to be used within the Virtual Network"},"subnetPrefix":{"type":"string","default":{"stringValue":"192.168.0.0/16","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Private + CIDR block to be used for the new subnet, should comply RFC 1918"},"subscriptionId":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Subscription ID"},"tags":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Tags + to be applied to the Azure VM instance"},"tenantId":{"type":"string","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Tenant ID"},"updateDomainCount":{"type":"string","default":{"stringValue":"5","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Update + domain count to use for availability set"},"usePrivateIp":{"type":"boolean","default":{"stringValue":"","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Use + private IP address of the machine to connect"},"vnet":{"type":"string","default":{"stringValue":"docker-machine-vnet","intValue":0,"boolValue":false,"stringSliceValue":null},"create":true,"update":true,"description":"Azure + Virtual Network name to connect the virtual machine (in [resourcegroup:]name + format)"}}}' + etcdRole: true + machineConfigRef: + kind: AzureConfig + name: ${AZ_CONFIG_NAME} + name: pool1 + quantity: 1 + unhealthyNodeTimeout: 0s + workerRole: true + machineSelectorConfig: + - config: + protect-kernel-defaults: false + registries: {} + upgradeStrategy: + controlPlaneConcurrency: "1" + controlPlaneDrainOptions: + deleteEmptyDirData: true + disableEviction: false + enabled: false + force: false + gracePeriod: -1 + ignoreDaemonSets: true + ignoreErrors: false + postDrainHooks: null + preDrainHooks: null + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 120 + workerConcurrency: "1" + workerDrainOptions: + deleteEmptyDirData: true + disableEviction: false + enabled: false + force: false + gracePeriod: -1 + ignoreDaemonSets: true + ignoreErrors: false + postDrainHooks: null + preDrainHooks: null + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 120 diff --git a/test/e2e/data/rancher/azure-rke-config.yaml b/test/e2e/data/rancher/azure-rke-config.yaml new file mode 100644 index 000000000..6f1b71c7d --- /dev/null +++ b/test/e2e/data/rancher/azure-rke-config.yaml @@ -0,0 +1,42 @@ +apiVersion: rke-machine-config.cattle.io/v1 +kind: AzureConfig +metadata: + annotations: + field.cattle.io/creatorId: ${USER} + name: ${POOL_NAME} + namespace: fleet-default +acceleratedNetworking: false +availabilitySet: docker-machine +availabilityZone: "" +diskSize: "30" +dockerPort: "2376" +enablePublicIpStandardSku: false +environment: AzurePublicCloud +faultDomainCount: "3" +image: canonical:UbuntuServer:18.04-LTS:latest +location: westus +managedDisks: false +noPublicIp: false +nsg: rancher-managed-wpepXjvf +openPort: +- 6443/tcp +- 2379/tcp +- 2380/tcp +- 8472/udp +- 4789/udp +- 9796/tcp +- 10256/tcp +- 10250/tcp +- 10251/tcp +- 10252/tcp +plan: "" +resourceGroup: docker-machine +size: Standard_D2_v2 +sshUser: docker-user +staticPublicIp: false +storageType: Standard_LRS +subnet: docker-machine +subnetPrefix: 192.168.0.0/16 +updateDomainCount: "5" +usePrivateIp: false +vnet: docker-machine-vnet diff --git a/test/e2e/data/rancher/ingress.yaml b/test/e2e/data/rancher/ingress.yaml index 663792a6f..bcffdf011 100644 --- a/test/e2e/data/rancher/ingress.yaml +++ b/test/e2e/data/rancher/ingress.yaml @@ -14,4 +14,4 @@ spec: service: name: rancher port: - number: 443 + number: 80 diff --git a/test/e2e/data/rancher/rancher-service-patch.yaml b/test/e2e/data/rancher/rancher-service-patch.yaml index faf1fdb3a..07653ed96 100644 --- a/test/e2e/data/rancher/rancher-service-patch.yaml +++ b/test/e2e/data/rancher/rancher-service-patch.yaml @@ -2,6 +2,6 @@ apiVersion: v1 kind: Service metadata: annotations: - k8s.ngrok.com/app-protocols: '{"https-internal":"HTTPS","http":"HTTP"}' + k8s.ngrok.com/app-protocols: '{"http":"HTTP"}' name: rancher namespace: cattle-system diff --git a/test/e2e/flags.go b/test/e2e/flags.go new file mode 100644 index 000000000..5f466b4a1 --- /dev/null +++ b/test/e2e/flags.go @@ -0,0 +1,61 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2023 SUSE. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import "flag" + +type FlagValues struct { + // ConfigPath is the path to the e2e config file. + ConfigPath string + + // UseExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply). + UseExistingCluster bool + + // ArtifactFolder is the folder to store e2e test artifacts. + ArtifactFolder string + + // SkipCleanup prevents cleanup of test resources e.g. for debug purposes. + SkipCleanup bool + + // HelmBinaryPath is the path to the helm binary. + HelmBinaryPath string + + // ChartPath is the path to the operator chart. + ChartPath string + + // IsolatedMode instructs the test to run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD + // or other providers that run in the same network as the bootstrap cluster. + IsolatedMode bool + + // ClusterctlBinaryPath is the path to the clusterctl binary to use. + ClusterctlBinaryPath string +} + +// InitFlags is used to specify the standard flags for the e2e tests. +func InitFlags(values *FlagValues) { + flag.StringVar(&values.ConfigPath, "e2e.config", "config/operator.yaml", "path to the e2e config file") + flag.StringVar(&values.ArtifactFolder, "e2e.artifacts-folder", "_artifacts", "folder where e2e test artifact should be stored") + flag.BoolVar(&values.SkipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") + flag.BoolVar(&values.UseExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") + flag.StringVar(&values.HelmBinaryPath, "e2e.helm-binary-path", "helm", "path to the helm binary") + flag.StringVar(&values.ClusterctlBinaryPath, "e2e.clusterctl-binary-path", "helm", "path to the clusterctl binary") + flag.StringVar(&values.ChartPath, "e2e.chart-path", "", "path to the operator chart") + flag.BoolVar(&values.IsolatedMode, "e2e.isolated-mode", false, "if true, the test will run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD or other providers that run in the same network as the bootstrap cluster.") +} diff --git a/test/e2e/suites/import-gitops/import_gitops.go b/test/e2e/suites/import-gitops/import_gitops.go index fa7602585..f77569d80 100644 --- a/test/e2e/suites/import-gitops/import_gitops.go +++ b/test/e2e/suites/import-gitops/import_gitops.go @@ -21,6 +21,7 @@ package import_gitops import ( "context" + "fmt" "os" "path/filepath" "strconv" @@ -237,7 +238,7 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ Getter: input.BootstrapClusterProxy.GetClient(), - ClusterName: capiCluster.Name, + SecretName: fmt.Sprintf("%s-capi-kubeconfig", capiCluster.Name), Namespace: capiCluster.Namespace, RancherServerURL: input.RancherServerURL, WriteToTempFile: true, diff --git a/test/e2e/suites/import-gitops/import_gitops_test.go b/test/e2e/suites/import-gitops/import_gitops_test.go index 12863db65..6e107f519 100644 --- a/test/e2e/suites/import-gitops/import_gitops_test.go +++ b/test/e2e/suites/import-gitops/import_gitops_test.go @@ -39,8 +39,8 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: clusterctlConfigPath, - ClusterctlBinaryPath: clusterctlBinaryPath, - ArtifactFolder: artifactFolder, + ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, + ArtifactFolder: flagVals.ArtifactFolder, ClusterTemplatePath: "../../data/cluster-templates/docker-kubeadm.yaml", ClusterName: "cluster1", ControlPlaneMachineCount: ptr.To[int](1), @@ -69,8 +69,8 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: clusterctlConfigPath, - ClusterctlBinaryPath: clusterctlBinaryPath, - ArtifactFolder: artifactFolder, + ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, + ArtifactFolder: flagVals.ArtifactFolder, ClusterTemplatePath: "../../data/cluster-templates/aws-eks-mmp.yaml", ClusterName: "cluster2", ControlPlaneMachineCount: ptr.To[int](1), diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index 5792a494b..a213a6b88 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -21,7 +21,6 @@ package import_gitops import ( "context" - "flag" "fmt" "os" "path/filepath" @@ -41,30 +40,7 @@ import ( // Test suite flags. var ( - // configPath is the path to the e2e config file. - configPath string - - // useExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply). - useExistingCluster bool - - // artifactFolder is the folder to store e2e test artifacts. - artifactFolder string - - // skipCleanup prevents cleanup of test resources e.g. for debug purposes. - skipCleanup bool - - // helmBinaryPath is the path to the helm binary. - helmBinaryPath string - - // chartPath is the path to the operator chart. - chartPath string - - // isolatedMode instructs the test to run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD - // or other providers that run in the same network as the bootstrap cluster. - isolatedMode bool - - // clusterctlBinaryPath is the path to the clusterctl binary to use. - clusterctlBinaryPath string + flagVals *e2e.FlagValues ) // Test suite global vars. @@ -86,14 +62,8 @@ var ( ) func init() { - flag.StringVar(&configPath, "e2e.config", "config/operator.yaml", "path to the e2e config file") - flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "_artifacts", "folder where e2e test artifact should be stored") - flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") - flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") - flag.StringVar(&helmBinaryPath, "e2e.helm-binary-path", "helm", "path to the helm binary") - flag.StringVar(&clusterctlBinaryPath, "e2e.clusterctl-binary-path", "helm", "path to the clusterctl binary") - flag.StringVar(&chartPath, "e2e.chart-path", "", "path to the operator chart") - flag.BoolVar(&isolatedMode, "e2e.isolated-mode", false, "if true, the test will run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD or other providers that run in the same network as the bootstrap cluster.") + flagVals = &e2e.FlagValues{} + e2e.InitFlags(flagVals) } func TestE2E(t *testing.T) { @@ -101,43 +71,43 @@ func TestE2E(t *testing.T) { ctrl.SetLogger(klog.Background()) - RunSpecs(t, "rancher-turtles-e2e") + RunSpecs(t, "rancher-turtles-e2e-import-gitops") } var _ = BeforeSuite(func() { - Expect(configPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder) - Expect(helmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(chartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + Expect(os.MkdirAll(flagVals.ArtifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) + Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") + Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", configPath)) - e2eConfig = e2e.LoadE2EConfig(configPath) + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluster, + UseExistingCluster: flagVals.UseExistingCluster, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: artifactFolder, + ArtifactFolder: flagVals.ArtifactFolder, Hostname: hostName, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesVersionVar), - IsolatedMode: isolatedMode, - HelmBinaryPath: helmBinaryPath, + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, }) - if isolatedMode { + if flagVals.IsolatedMode { hostName = setupClusterResult.IsolatedHostName } testenv.DeployRancherTurtles(ctx, testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: helmBinaryPath, - ChartPath: chartPath, + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartPath: flagVals.ChartPath, CAPIProvidersSecretYAML: e2e.CapiProvidersSecret, CAPIProvidersYAML: e2e.CapiProviders, Namespace: turtlesframework.DefaultRancherTurtlesNamespace, @@ -170,8 +140,8 @@ var _ = BeforeSuite(func() { testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: helmBinaryPath, - IsolatedMode: isolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + IsolatedMode: flagVals.IsolatedMode, NginxIngress: e2e.NginxIngress, NginxIngressNamespace: e2e.NginxIngressNamespace, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), @@ -185,7 +155,7 @@ var _ = BeforeSuite(func() { testenv.DeployRancher(ctx, testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: helmBinaryPath, + HelmBinaryPath: flagVals.HelmBinaryPath, RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), @@ -197,14 +167,14 @@ var _ = BeforeSuite(func() { RancherSettingsPatch: e2e.RancherSettingPatch, RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - IsolatedMode: isolatedMode, + IsolatedMode: flagVals.IsolatedMode, RancherIngressConfig: e2e.IngressConfig, RancherServicePatch: e2e.RancherServicePatch, }) giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: helmBinaryPath, + HelmBinaryPath: flagVals.HelmBinaryPath, ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), @@ -225,7 +195,7 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactFolder, + SkipCleanup: flagVals.SkipCleanup, + ArtifactFolder: flagVals.ArtifactFolder, }) }) diff --git a/test/e2e/suites/v2prov/suite_test.go b/test/e2e/suites/v2prov/suite_test.go new file mode 100644 index 000000000..aed861389 --- /dev/null +++ b/test/e2e/suites/v2prov/suite_test.go @@ -0,0 +1,161 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2023 SUSE. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2prov + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework" + "github.com/rancher-sandbox/rancher-turtles/test/testenv" +) + +// Test suite flags. +var ( + flagVals *e2e.FlagValues +) + +// Test suite global vars. +var ( + // e2eConfig to be used for this test, read from configPath. + e2eConfig *clusterctl.E2EConfig + + // clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository + // with the providers specified in the configPath. + clusterctlConfigPath string + + // hostName is the host name for the Rancher Manager server. + hostName string + + ctx = context.Background() + + setupClusterResult *testenv.SetupTestClusterResult +) + +func init() { + flagVals = &e2e.FlagValues{} + e2e.InitFlags(flagVals) +} + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + + ctrl.SetLogger(klog.Background()) + + RunSpecs(t, "rancher-turtles-e2e-v2prov") +} + +var _ = BeforeSuite(func() { + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + Expect(os.MkdirAll(flagVals.ArtifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) + Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") + Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") + + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + + By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + Hostname: hostName, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + }) + + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + + testenv.DeployRancherTurtles(ctx, testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartPath: flagVals.ChartPath, + CAPIProvidersSecretYAML: e2e.CapiProvidersSecret, + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher-sandbox/rancher-turtles-amd64", + Tag: "v0.0.1", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + }) + + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + IsolatedMode: flagVals.IsolatedMode, + NginxIngress: e2e.NginxIngress, + NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + testenv.DeployRancher(ctx, testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + RancherChartRepoName: "rancher-latest", + RancherChartURL: "https://releases.rancher.com/server-charts/latest", + RancherChartPath: "rancher-latest/rancher", + //RancherVersion: "v2.7.7", + RancherImageTag: "v2.7-head", + Development: true, + RancherHost: hostName, + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherFeatures: e2eConfig.GetVariable(e2e.RancherFeaturesVar), + RancherSettingsPatch: e2e.RancherSettingPatch, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + IsolatedMode: flagVals.IsolatedMode, + RancherIngressConfig: e2e.IngressConfig, + RancherServicePatch: e2e.RancherServicePatch, + }) +}) + +var _ = AfterSuite(func() { + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: flagVals.SkipCleanup, + ArtifactFolder: flagVals.ArtifactFolder, + }) +}) diff --git a/test/e2e/suites/v2prov/v2prov_test.go b/test/e2e/suites/v2prov/v2prov_test.go new file mode 100644 index 000000000..59b69fa35 --- /dev/null +++ b/test/e2e/suites/v2prov/v2prov_test.go @@ -0,0 +1,166 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2023 SUSE. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2prov + +import ( + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/drone/envsubst/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/envtest/komega" + + provisioningv1 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/provisioning/v1" + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework" +) + +var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still work", Label(e2e.FullTestLabel), func() { + + BeforeEach(func() { + komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + It("Should create a RKE2 cluster in Azure", func() { + azSubId := e2eConfig.GetVariable(e2e.AzureSubIDVar) + Expect(azSubId).ToNot(BeEmpty(), "Azure Subscription ID is required") + azClientId := e2eConfig.GetVariable(e2e.AzureClientIDVar) + Expect(azSubId).ToNot(BeEmpty(), "Azure Client ID is required") + azClientSecret := e2eConfig.GetVariable(e2e.AzureClientSecretVar) + Expect(azSubId).ToNot(BeEmpty(), "Azure Client Secret is required") + + rke2Version := e2eConfig.GetVariable(e2e.RKE2VersionVar) + Expect(rke2Version).ToNot(BeEmpty(), "RKE2 version is required") + + credsSecretName := "cc-test99" + credsName := "az-ecm" + poolName := "az-test-pool" + clusterName := "az-cluster1" + + lookupResult := &turtlesframework.RancherLookupUserResult{} + turtlesframework.RancherLookupUser(ctx, turtlesframework.RancherLookupUserInput{ + Username: "admin", + ClusterProxy: setupClusterResult.BootstrapClusterProxy, + }, lookupResult) + + turtlesframework.CreateSecret(ctx, turtlesframework.CreateSecretInput{ + Creator: setupClusterResult.BootstrapClusterProxy.GetClient(), + Name: credsSecretName, + Namespace: "cattle-global-data", + Type: corev1.SecretTypeOpaque, + Data: map[string]string{ + "azurecredentialConfig-clientId": azClientId, + "azurecredentialConfig-clientSecret": azClientSecret, + "azurecredentialConfig-environment": "AzurePublicCloud", + "azurecredentialConfig-subscriptionId": azSubId, + "azurecredentialConfig-tenantId": "", + }, + Annotations: map[string]string{ + "field.cattle.io/name": credsName, + "provisioning.cattle.io/driver": "azure", + "field.cattle.io/creatorId": lookupResult.User, + }, + Labels: map[string]string{ + "cattle.io/creator": "norman", + }, + }) + + rkeConfig, err := envsubst.Eval(string(e2e.V2ProvAzureRkeConfig), func(s string) string { + switch s { + case "POOL_NAME": + return poolName + case "USER": + return lookupResult.User + default: + return os.Getenv(s) + } + }) + Expect(err).ToNot(HaveOccurred()) + Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(rkeConfig))).To(Succeed(), "Failed apply Digital Ocean RKE config") + + cluster, err := envsubst.Eval(string(e2e.V2ProvAzureCluster), func(s string) string { + switch s { + case "CLUSTER_NAME": + return clusterName + case "USER": + return lookupResult.User + case "CREDENTIAL_SECRET": + return fmt.Sprintf("cattle-global-data:%s", credsSecretName) + case "KUBERNETES_VERSION": + return rke2Version + case "AZ_CONFIG_NAME": + return poolName + default: + return os.Getenv(s) + } + }) + Expect(err).ToNot(HaveOccurred()) + Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(cluster))).To(Succeed(), "Failed apply Digital Ocean cluster config") + + By("Waiting for the rancher cluster record to appear") + rancherCluster := &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: "fleet-default", + Name: clusterName, + }} + Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + + By("Waiting for the rancher cluster to have a deployed agent") + Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-v2prov-create")...).Should(HaveField("Status.AgentDeployed", BeTrue())) + + By("Waiting for the rancher cluster to be ready") + Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue())) + + By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") + rancherKubeconfig := &turtlesframework.RancherGetClusterKubeconfigResult{} + turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ + Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + SecretName: fmt.Sprintf("%s-kubeconfig", rancherCluster.Name), + Namespace: rancherCluster.Namespace, + RancherServerURL: hostName, + WriteToTempFile: true, + }, rancherKubeconfig) + + rancherConnectRes := &turtlesframework.RunCommandResult{} + turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{ + Command: "kubectl", + Args: []string{ + "--kubeconfig", + rancherKubeconfig.TempFilePath, + "get", + "nodes", + "--insecure-skip-tls-verify", + }, + }, rancherConnectRes) + Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") + Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") + + By("Deleting cluster from Rancher") + err = setupClusterResult.BootstrapClusterProxy.GetClient().Delete(ctx, rancherCluster) + Expect(err).NotTo(HaveOccurred(), "Failed to delete rancher cluster") + + By("Waiting for the rancher cluster record to be removed") + Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-azure-delete")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") + }) +}) diff --git a/test/framework/kube_helper.go b/test/framework/kube_helper.go index 03699abdd..d824ddbb4 100644 --- a/test/framework/kube_helper.go +++ b/test/framework/kube_helper.go @@ -105,11 +105,13 @@ func GetServicePortByName(ctx context.Context, input GetServicePortByNameInput, // CreateSecretInput is the input to CreateSecret. type CreateSecretInput struct { - Creator framework.Creator - Name string - Namespace string - Type corev1.SecretType - Data map[string]string + Creator framework.Creator + Name string + Namespace string + Type corev1.SecretType + Data map[string]string + Labels map[string]string + Annotations map[string]string } // CreateSecret will create a new Kubernetes secret. @@ -131,6 +133,13 @@ func CreateSecret(ctx context.Context, input CreateSecretInput) { Type: input.Type, } + if len(input.Annotations) > 0 { + secret.Annotations = input.Annotations + } + if len(input.Labels) > 0 { + secret.Labels = input.Labels + } + Eventually(func() error { return input.Creator.Create(ctx, secret) }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create secret %s", klog.KObj(secret)) diff --git a/test/framework/rancher_helpers.go b/test/framework/rancher_helpers.go index 0fe3e4bbb..4eac528f3 100644 --- a/test/framework/rancher_helpers.go +++ b/test/framework/rancher_helpers.go @@ -18,7 +18,6 @@ package framework import ( "context" - "fmt" "net/url" "os" @@ -26,6 +25,8 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/cluster-api/test/framework" @@ -34,7 +35,7 @@ import ( // RancherGetClusterKubeconfigInput is the input to RancherGetClusterKubeconfig. type RancherGetClusterKubeconfigInput struct { Getter framework.Getter - ClusterName string + SecretName string Namespace string RancherServerURL string WriteToTempFile bool @@ -50,7 +51,7 @@ type RancherGetClusterKubeconfigResult struct { func RancherGetClusterKubeconfig(ctx context.Context, input RancherGetClusterKubeconfigInput, result *RancherGetClusterKubeconfigResult) { Expect(ctx).NotTo(BeNil(), "ctx is required for RancherGetClusterKubeconfig") Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling RancherGetClusterKubeconfig") - Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be nil when calling RancherGetClusterKubeconfig") + Expect(input.SecretName).ToNot(BeEmpty(), "Invalid argument. input.SecretName can't be nil when calling RancherGetClusterKubeconfig") Expect(input.RancherServerURL).ToNot(BeEmpty(), "Invalid argument. input.RancherServerURL can't be nil when calling RancherGetClusterKubeconfig") if input.Namespace == "" { @@ -58,13 +59,10 @@ func RancherGetClusterKubeconfig(ctx context.Context, input RancherGetClusterKub } By("Getting Rancher kubeconfig secret") - - kubeConfigSecretName := fmt.Sprintf("%s-capi-kubeconfig", input.ClusterName) - secret := &corev1.Secret{} - err := input.Getter.Get(ctx, types.NamespacedName{Namespace: input.Namespace, Name: kubeConfigSecretName}, secret) - Expect(err).ShouldNot(HaveOccurred(), "Getting Rancher kubeconfig secret %s", kubeConfigSecretName) + err := input.Getter.Get(ctx, types.NamespacedName{Namespace: input.Namespace, Name: input.SecretName}, secret) + Expect(err).ShouldNot(HaveOccurred(), "Getting Rancher kubeconfig secret %s", input.SecretName) content, ok := secret.Data["value"] Expect(ok).To(BeTrue(), "Failed to find expected key in kubeconfig secret") @@ -104,3 +102,53 @@ func RancherGetClusterKubeconfig(ctx context.Context, input RancherGetClusterKub result.TempFilePath = tempFile.Name() } + +type RancherLookupUserInput struct { + ClusterProxy framework.ClusterProxy + Username string +} + +type RancherLookupUserResult struct { + User string +} + +func RancherLookupUser(ctx context.Context, input RancherLookupUserInput, result *RancherLookupUserResult) { + Expect(ctx).NotTo(BeNil(), "ctx is required for RancherLookupUser") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling RancherLookupUser") + Expect(input.Username).ToNot(BeEmpty(), "Invalid argument. input.Username can't be nil when calling RancherLookupUser") + + gvkUser := schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "User"} + + usersList := &unstructured.Unstructured{} + usersList.SetGroupVersionKind(gvkUser) + err := input.ClusterProxy.GetClient().List(ctx, usersList) + Expect(err).NotTo(HaveOccurred(), "Failed to list users") + + field, ok := usersList.Object["items"] + Expect(ok).To(BeTrue(), "Returned content is not a list") + + items, ok := field.([]interface{}) + Expect(ok).To(BeTrue(), "Returned content is not a list") + foundUser := "" + for _, item := range items { + child, ok := item.(map[string]interface{}) + Expect(ok).To(BeTrue(), "items member is not an object") + + username, ok := child["username"].(string) + if !ok { + continue + } + + if username != input.Username { + continue + } + + obj := &unstructured.Unstructured{Object: child} + foundUser = obj.GetName() + break + } + + Expect(foundUser).ToNot(BeEmpty(), "Failed to find user for %s", input.Username) + + result.User = foundUser +} diff --git a/test/testenv/rancher.go b/test/testenv/rancher.go index 32bc92220..d0655886e 100644 --- a/test/testenv/rancher.go +++ b/test/testenv/rancher.go @@ -40,6 +40,7 @@ type DeployRancherInput struct { RancherChartURL string RancherChartPath string RancherVersion string + RancherImageTag string RancherNamespace string RancherHost string RancherPassword string @@ -50,6 +51,7 @@ type DeployRancherInput struct { IsolatedMode bool RancherIngressConfig []byte RancherServicePatch []byte + Development bool } func DeployRancher(ctx context.Context, input DeployRancherInput) { @@ -60,13 +62,19 @@ func DeployRancher(ctx context.Context, input DeployRancherInput) { Expect(input.RancherChartRepoName).ToNot(BeEmpty(), "RancherChartRepoName is required for DeployRancher") Expect(input.RancherChartURL).ToNot(BeEmpty(), "RancherChartURL is required for DeployRancher") Expect(input.RancherChartPath).ToNot(BeEmpty(), "RancherChartPath is required for DeployRancher") - Expect(input.RancherVersion).ToNot(BeEmpty(), "RancherVersion is required for DeployRancher") Expect(input.RancherNamespace).ToNot(BeEmpty(), "RancherNamespace is required for DeployRancher") Expect(input.RancherHost).ToNot(BeEmpty(), "RancherHost is required for DeployRancher") Expect(input.RancherPassword).ToNot(BeEmpty(), "RancherPassword is required for DeployRancher") Expect(input.RancherWaitInterval).ToNot(BeNil(), "RancherWaitInterval is required for DeployRancher") Expect(input.ControllerWaitInterval).ToNot(BeNil(), "ControllerWaitInterval is required for DeployRancher") + if input.RancherVersion == "" && input.RancherImageTag == "" { + Fail("RancherVersion or RancherImageTag is required") + } + if input.RancherVersion != "" && input.RancherImageTag != "" { + Fail("Only one of RancherVersion or RancherImageTag cen be used") + } + By("Adding Rancher chart repo") addChart := &opframework.HelmChart{ BinaryPath: input.HelmBinaryPath, @@ -88,17 +96,24 @@ func DeployRancher(ctx context.Context, input DeployRancherInput) { Expect(err).ToNot(HaveOccurred()) By("Installing Rancher") + installFlags := opframework.Flags( + "--namespace", input.RancherNamespace, + "--create-namespace", + "--wait", + ) + if input.RancherVersion != "" { + installFlags = append(installFlags, "--version", input.RancherVersion) + } + if input.Development { + installFlags = append(installFlags, "--devel") + } + chart := &opframework.HelmChart{ - BinaryPath: input.HelmBinaryPath, - Path: input.RancherChartPath, - Name: "rancher", - Kubeconfig: input.BootstrapClusterProxy.GetKubeconfigPath(), - AdditionalFlags: opframework.Flags( - "--version", input.RancherVersion, - "--namespace", input.RancherNamespace, - "--create-namespace", - "--wait", - ), + BinaryPath: input.HelmBinaryPath, + Path: input.RancherChartPath, + Name: "rancher", + Kubeconfig: input.BootstrapClusterProxy.GetKubeconfigPath(), + AdditionalFlags: installFlags, } values := map[string]string{ "bootstrapPassword": input.RancherPassword, @@ -109,6 +124,9 @@ func DeployRancher(ctx context.Context, input DeployRancherInput) { if input.RancherFeatures != "" { values["features"] = input.RancherFeatures } + if input.RancherImageTag != "" { + values["rancherImageTag"] = input.RancherImageTag + } _, err = chart.Run(values) Expect(err).ToNot(HaveOccurred())