From 760accbdd392a958ec5bb5b73b92db34da2ea6b3 Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Thu, 11 Jul 2024 18:20:14 +0200 Subject: [PATCH] Merge: Revert tests to 256cfda Signed-off-by: Danil-Grigorev --- .../e2e/specs/migrate_gitops_provv1_mgmtv3.go | 507 +++++------------- .../suites/migrate-gitops-v1-v3/suite_test.go | 248 --------- .../migrate_gitops_mgmtv3_ provv1_test.go | 67 --- .../migrate_gitops_provv1_mgmtv3_test.go | 8 +- .../suite_test.go | 18 +- 5 files changed, 144 insertions(+), 704 deletions(-) delete mode 100644 test/e2e/suites/migrate-gitops-v1-v3/suite_test.go delete mode 100644 test/e2e/suites/migrate-gitops-v3-v1/migrate_gitops_mgmtv3_ provv1_test.go rename test/e2e/suites/{migrate-gitops-v1-v3 => migrate-gitops}/migrate_gitops_provv1_mgmtv3_test.go (90%) rename test/e2e/suites/{migrate-gitops-v3-v1 => migrate-gitops}/suite_test.go (95%) diff --git a/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go b/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go index 10673219..069ccb22 100644 --- a/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go +++ b/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go @@ -50,7 +50,7 @@ import ( turtlesnaming "github.com/rancher/turtles/util/naming" ) -type MigrateClusterUsingGitOpsSpecInput struct { +type MigrateToV3UsingGitOpsSpecInput struct { E2EConfig *clusterctl.E2EConfig BootstrapClusterProxy framework.ClusterProxy ClusterctlConfigPath string @@ -92,132 +92,114 @@ type MigrateClusterUsingGitOpsSpecInput struct { OwnedLabelName string } -type ValidateLegacyRancherClusterInput struct { - Namespace *corev1.Namespace - CAPICluster *clusterv1.Cluster - RancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult - RancherLegacyCluster *provisioningv1.Cluster - *MigrateClusterUsingGitOpsSpecInput -} - -func ValidateLegacyRancherCluster(ctx context.Context, input *ValidateLegacyRancherClusterInput) { - By("Waiting for the rancher cluster record to appear") - input.RancherLegacyCluster = &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{ - Namespace: input.Namespace.Name, - Name: turtlesnaming.Name(input.CAPICluster.Name).ToRancherName(), - }} - Eventually(komega.Get(input.RancherLegacyCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) - - By("Waiting for the rancher cluster to have a deployed agent") - Eventually(komega.Object(input.RancherLegacyCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.AgentDeployed", BeTrue())) - - By("Waiting for the rancher cluster to be ready") - Eventually(komega.Object(input.RancherLegacyCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue())) - - By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") - turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ - Getter: input.BootstrapClusterProxy.GetClient(), - SecretName: fmt.Sprintf("%s-capi-kubeconfig", input.CAPICluster.Name), - Namespace: input.CAPICluster.Namespace, - RancherServerURL: input.RancherServerURL, - WriteToTempFile: true, - }, input.RancherKubeconfig) - - rancherConnectRes := &turtlesframework.RunCommandResult{} - turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{ - Command: "kubectl", - Args: []string{ - "--kubeconfig", - input.RancherKubeconfig.TempFilePath, - "get", - "nodes", - "--insecure-skip-tls-verify", - }, - }, rancherConnectRes) - Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") - Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") -} - -type ValidateRancherClusterInput struct { - Namespace *corev1.Namespace - CAPICluster *clusterv1.Cluster - RancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult - RancherCluster *managementv3.Cluster - *MigrateClusterUsingGitOpsSpecInput -} - -func ValidateRancherCluster(ctx context.Context, input *ValidateRancherClusterInput) { - By("Waiting for the rancher cluster record to appear") - rancherClusters := &managementv3.ClusterList{} - selectors := []client.ListOption{ - client.MatchingLabels{ - input.CapiClusterOwnerLabel: input.CAPICluster.Name, - input.CapiClusterOwnerNamespaceLabel: input.CAPICluster.Namespace, - input.OwnedLabelName: "", - }, - } - Eventually(func() bool { - Eventually(komega.List(rancherClusters, selectors...)).Should(Succeed()) - return len(rancherClusters.Items) == 1 - }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) - input.RancherCluster = &rancherClusters.Items[0] - Eventually(komega.Get(input.RancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) - - By("Waiting for the rancher cluster to have a deployed agent") - Eventually(func() bool { - Eventually(komega.Get(input.RancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) - return conditions.IsTrue(input.RancherCluster, managementv3.ClusterConditionAgentDeployed) - }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) - - By("Waiting for the rancher cluster to be ready") - Eventually(func() bool { - Eventually(komega.Get(input.RancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) - return conditions.IsTrue(input.RancherCluster, managementv3.ClusterConditionReady) - }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) - - By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") - turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ - Getter: input.BootstrapClusterProxy.GetClient(), - SecretName: fmt.Sprintf("%s-kubeconfig", input.RancherCluster.Name), - Namespace: input.RancherCluster.Spec.FleetWorkspaceName, - RancherServerURL: input.RancherServerURL, - WriteToTempFile: true, - }, input.RancherKubeconfig) - - rancherConnectRes := &turtlesframework.RunCommandResult{} - turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{ - Command: "kubectl", - Args: []string{ - "--kubeconfig", - input.RancherKubeconfig.TempFilePath, - "get", - "nodes", - "--insecure-skip-tls-verify", - }, - }, rancherConnectRes) - Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") - Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") -} - // MigrateToV3UsingGitOpsSpec implements a spec that will create a cluster via Fleet and test that it // automatically imports into Rancher Manager. -func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateClusterUsingGitOpsSpecInput) { +func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateToV3UsingGitOpsSpecInput) { var ( - specName = "migrategitopsv1tov3" - input MigrateClusterUsingGitOpsSpecInput - namespace *corev1.Namespace - repoName string - cancelWatches context.CancelFunc - capiCluster *types.NamespacedName - rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult - originalKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult - rancherCluster *managementv3.Cluster - rancherLegacyCluster *provisioningv1.Cluster - skipCapiClusterCleanup bool - capiClusterCreateWait []interface{} - deleteClusterWait []interface{} + specName = "migrategitops" + input MigrateToV3UsingGitOpsSpecInput + namespace *corev1.Namespace + repoName string + cancelWatches context.CancelFunc + capiCluster *types.NamespacedName + rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult + originalKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult + rancherConnectRes *turtlesframework.RunCommandResult + rancherCluster *managementv3.Cluster + rancherLegacyCluster *provisioningv1.Cluster + capiClusterCreateWait []interface{} + deleteClusterWait []interface{} ) + validateLegacyRancherCluster := func() { + By("Waiting for the rancher cluster record to appear") + rancherLegacyCluster = &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace.Name, + Name: turtlesnaming.Name(capiCluster.Name).ToRancherName(), + }} + Eventually(komega.Get(rancherLegacyCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + + By("Waiting for the rancher cluster to have a deployed agent") + Eventually(komega.Object(rancherLegacyCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.AgentDeployed", BeTrue())) + + By("Waiting for the rancher cluster to be ready") + Eventually(komega.Object(rancherLegacyCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue())) + + By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") + turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + SecretName: fmt.Sprintf("%s-capi-kubeconfig", capiCluster.Name), + Namespace: capiCluster.Namespace, + RancherServerURL: input.RancherServerURL, + WriteToTempFile: true, + }, rancherKubeconfig) + + turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{ + Command: "kubectl", + Args: []string{ + "--kubeconfig", + rancherKubeconfig.TempFilePath, + "get", + "nodes", + "--insecure-skip-tls-verify", + }, + }, rancherConnectRes) + Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") + Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") + } + + validateRancherCluster := func() { + By("Waiting for the rancher cluster record to appear") + rancherClusters := &managementv3.ClusterList{} + selectors := []client.ListOption{ + client.MatchingLabels{ + input.CapiClusterOwnerLabel: capiCluster.Name, + input.CapiClusterOwnerNamespaceLabel: capiCluster.Namespace, + input.OwnedLabelName: "", + }, + } + Eventually(func() bool { + Eventually(komega.List(rancherClusters, selectors...)).Should(Succeed()) + return len(rancherClusters.Items) == 1 + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + rancherCluster = &rancherClusters.Items[0] + Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + + By("Waiting for the rancher cluster to have a deployed agent") + Eventually(func() bool { + Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + return conditions.IsTrue(rancherCluster, managementv3.ClusterConditionAgentDeployed) + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + + By("Waiting for the rancher cluster to be ready") + Eventually(func() bool { + Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + return conditions.IsTrue(rancherCluster, managementv3.ClusterConditionReady) + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + + By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") + turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + SecretName: fmt.Sprintf("%s-kubeconfig", rancherCluster.Name), + Namespace: rancherCluster.Spec.FleetWorkspaceName, + RancherServerURL: input.RancherServerURL, + WriteToTempFile: true, + }, rancherKubeconfig) + + turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{ + Command: "kubectl", + Args: []string{ + "--kubeconfig", + rancherKubeconfig.TempFilePath, + "get", + "nodes", + "--insecure-skip-tls-verify", + }, + }, rancherConnectRes) + Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") + Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") + } + BeforeEach(func() { Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) input = inputGetter() @@ -243,14 +225,13 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateC rancherKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) originalKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) - - skipCapiClusterCleanup = false + rancherConnectRes = new(turtlesframework.RunCommandResult) komega.SetClient(input.BootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) - It("Should automatically migrate provisioning v1 cluster to management v3", func() { + It("Should import a cluster using gitops", func() { controlPlaneMachineCount := 1 if input.ControlPlaneMachineCount != nil { controlPlaneMachineCount = *input.ControlPlaneMachineCount @@ -336,21 +317,21 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateC }) By("Waiting for the CAPI cluster to appear") - cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{ + capiCluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{ Namespace: namespace.Name, Name: input.ClusterName, }} Eventually( - komega.Get(cluster), + komega.Get(capiCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...). Should(Succeed(), "Failed to apply CAPI cluster definition to cluster via Fleet") By("Waiting for cluster control plane to be Ready") - Eventually(komega.Object(cluster), capiClusterCreateWait...).Should(HaveField("Status.ControlPlaneReady", BeTrue())) + Eventually(komega.Object(capiCluster), capiClusterCreateWait...).Should(HaveField("Status.ControlPlaneReady", BeTrue())) By("Waiting for the CAPI cluster to be connectable") Eventually(func() error { - remoteClient := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name).GetClient() + remoteClient := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, capiCluster.Namespace, capiCluster.Name).GetClient() namespaces := &corev1.NamespaceList{} return remoteClient.List(ctx, namespaces) @@ -359,19 +340,13 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateC By("Storing the original CAPI cluster kubeconfig") turtlesframework.RancherGetOriginalKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ Getter: input.BootstrapClusterProxy.GetClient(), - SecretName: fmt.Sprintf("%s-kubeconfig", cluster.Name), - Namespace: cluster.Namespace, + SecretName: fmt.Sprintf("%s-kubeconfig", capiCluster.Name), + Namespace: capiCluster.Namespace, WriteToTempFile: true, }, originalKubeconfig) By("Running checks on Rancher cluster") - ValidateLegacyRancherCluster(ctx, &ValidateLegacyRancherClusterInput{ - Namespace: namespace, - CAPICluster: cluster, - RancherKubeconfig: rancherKubeconfig, - RancherLegacyCluster: rancherLegacyCluster, - MigrateClusterUsingGitOpsSpecInput: &input, - }) + validateLegacyRancherCluster() testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ HelmBinaryPath: input.HelmBinaryPath, @@ -387,6 +362,7 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateC Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-controllers"), + SkipCleanup: true, AdditionalValues: map[string]string{}, } @@ -397,7 +373,6 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateC upgradeInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "true" upgradeInput.AdditionalValues["rancherTurtles.features.managementv3-cluster-migration.enabled"] = "true" - By("Upgrade turtles to management v3 cluster") testenv.UpgradeRancherTurtles(ctx, upgradeInput) By("Waiting for the Rancher cluster record to be removed") @@ -405,255 +380,33 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateC By("CAPI cluster should NOT have the 'imported' annotation") Consistently(func() bool { - Eventually(komega.Get(cluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) - annotations := cluster.GetAnnotations() + Eventually(komega.Get(capiCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + annotations := capiCluster.GetAnnotations() _, found := annotations["imported"] return !found }, 5*time.Second).Should(BeTrue(), "'imported' annotation is NOT expected on CAPI cluster") By("Rancher should be available using new cluster import") - ValidateRancherCluster(ctx, &ValidateRancherClusterInput{ - Namespace: namespace, - CAPICluster: cluster, - RancherKubeconfig: rancherKubeconfig, - RancherCluster: rancherCluster, - MigrateClusterUsingGitOpsSpecInput: &input, - }) - }) - - AfterEach(func() { - err := testenv.CollectArtifacts(ctx, input.BootstrapClusterProxy.GetKubeconfigPath(), path.Join(input.ArtifactFolder, input.BootstrapClusterProxy.GetName(), input.ClusterName+"bootstrap"+specName)) - if err != nil { - fmt.Printf("Failed to collect artifacts for the bootstrap cluster: %v\n", err) - } - - err = testenv.CollectArtifacts(ctx, originalKubeconfig.TempFilePath, path.Join(input.ArtifactFolder, input.BootstrapClusterProxy.GetName(), input.ClusterName+specName)) - if err != nil { - fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) - } - - e2e.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, capiCluster, input.E2EConfig.GetIntervals, skipCapiClusterCleanup) - }) -} - -func DowngradeToV1UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateClusterUsingGitOpsSpecInput) { - var ( - specName = "migrategitopsv3tov1" - input MigrateClusterUsingGitOpsSpecInput - namespace *corev1.Namespace - repoName string - cancelWatches context.CancelFunc - capiCluster *types.NamespacedName - rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult - originalKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult - rancherCluster *managementv3.Cluster - rancherLegacyCluster *provisioningv1.Cluster - skipCapiClusterCleanup bool - capiClusterCreateWait []interface{} - deleteClusterWait []interface{} - ) - - BeforeEach(func() { - Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) - input = inputGetter() - Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) - Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) - Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) - Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) - - Expect(input.E2EConfig.Variables).To(HaveKey(e2e.KubernetesManagementVersionVar)) - namespace, cancelWatches = e2e.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) - repoName = e2e.CreateRepoName(specName) - - capiClusterCreateWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.CAPIClusterCreateWaitName) - Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName) - - deleteClusterWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.DeleteClusterWaitName) - Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName) - - capiCluster = &types.NamespacedName{ - Namespace: namespace.Name, - Name: input.ClusterName, - } - - rancherKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) - originalKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) - - skipCapiClusterCleanup = false - - komega.SetClient(input.BootstrapClusterProxy.GetClient()) - komega.SetContext(ctx) - }) - - It("Should automatically downgrade management v3 cluster to provisioning v1", func() { - controlPlaneMachineCount := 1 - if input.ControlPlaneMachineCount != nil { - controlPlaneMachineCount = *input.ControlPlaneMachineCount - } - - workerMachineCount := 1 - if input.WorkerMachineCount != nil { - workerMachineCount = *input.WorkerMachineCount - } - - if input.LabelNamespace { - turtlesframework.AddLabelsToNamespace(ctx, turtlesframework.AddLabelsToNamespaceInput{ - ClusterProxy: input.BootstrapClusterProxy, - Name: namespace.Name, - Labels: map[string]string{ - "cluster-api.cattle.io/rancher-auto-import": "true", - }, - }) - } - - By("Create Git repository") - - repoCloneAddr := turtlesframework.GiteaCreateRepo(ctx, turtlesframework.GiteaCreateRepoInput{ - ServerAddr: input.GitAddr, - RepoName: repoName, - Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), - Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), - }) - repoDir := turtlesframework.GitCloneRepo(ctx, turtlesframework.GitCloneRepoInput{ - Address: repoCloneAddr, - Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), - Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), - }) - - By("Create fleet repository structure") - - clustersDir := filepath.Join(repoDir, "clusters") - os.MkdirAll(clustersDir, os.ModePerm) - - additionalVars := map[string]string{ - "CLUSTER_NAME": input.ClusterName, - "WORKER_MACHINE_COUNT": strconv.Itoa(workerMachineCount), - "CONTROL_PLANE_MACHINE_COUNT": strconv.Itoa(controlPlaneMachineCount), - } - for k, v := range input.AdditionalTemplateVariables { - additionalVars[k] = v - } - - clusterPath := filepath.Join(clustersDir, fmt.Sprintf("%s.yaml", input.ClusterName)) - Expect(turtlesframework.ApplyFromTemplate(ctx, turtlesframework.ApplyFromTemplateInput{ - Getter: input.E2EConfig.GetVariable, - Template: input.ClusterTemplate, - OutputFilePath: clusterPath, - AddtionalEnvironmentVariables: additionalVars, - })).To(Succeed()) - - fleetPath := filepath.Join(clustersDir, "fleet.yaml") - turtlesframework.FleetCreateFleetFile(ctx, turtlesframework.FleetCreateFleetFileInput{ - Namespace: namespace.Name, - FilePath: fleetPath, - }) - - By("Committing changes to fleet repo and pushing") - - turtlesframework.GitCommitAndPush(ctx, turtlesframework.GitCommitAndPushInput{ - CloneLocation: repoDir, - Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), - Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), - CommitMessage: "ci: add clusters bundle", - }) - - By("Applying GitRepo") - - turtlesframework.FleetCreateGitRepo(ctx, turtlesframework.FleetCreateGitRepoInput{ - Name: repoName, - Namespace: turtlesframework.FleetLocalNamespace, - Branch: turtlesframework.DefaultBranchName, - Repo: repoCloneAddr, - FleetGeneration: 1, - Paths: []string{"clusters"}, - ClientSecretName: input.GitAuthSecretName, - ClusterProxy: input.BootstrapClusterProxy, - }) - - By("Waiting for the CAPI cluster to appear") - cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace.Name, - Name: input.ClusterName, - }} - Eventually( - komega.Get(cluster), - input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...). - Should(Succeed(), "Failed to apply CAPI cluster definition to cluster via Fleet") - - By("Waiting for cluster control plane to be Ready") - Eventually(komega.Object(cluster), capiClusterCreateWait...).Should(HaveField("Status.ControlPlaneReady", BeTrue())) - - By("Waiting for the CAPI cluster to be connectable") - Eventually(func() error { - remoteClient := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name).GetClient() - namespaces := &corev1.NamespaceList{} - - return remoteClient.List(ctx, namespaces) - }, capiClusterCreateWait...).Should(Succeed(), "Failed to connect to workload cluster using CAPI kubeconfig") - - By("Storing the original CAPI cluster kubeconfig") - turtlesframework.RancherGetOriginalKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ - Getter: input.BootstrapClusterProxy.GetClient(), - SecretName: fmt.Sprintf("%s-kubeconfig", cluster.Name), - Namespace: cluster.Namespace, - WriteToTempFile: true, - }, originalKubeconfig) - - By("Running checks on Rancher cluster") - ValidateRancherCluster(ctx, &ValidateRancherClusterInput{ - Namespace: namespace, - CAPICluster: cluster, - RancherKubeconfig: rancherKubeconfig, - RancherCluster: rancherCluster, - MigrateClusterUsingGitOpsSpecInput: &input, - }) - - testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ - HelmBinaryPath: input.HelmBinaryPath, - ChartsPath: input.ChartPath, - BootstrapClusterProxy: input.BootstrapClusterProxy, - WaitInterval: input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-controllers"), - }) - - upgradeInput := testenv.UpgradeRancherTurtlesInput{ - BootstrapClusterProxy: input.BootstrapClusterProxy, - HelmBinaryPath: input.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", - WaitDeploymentsReadyInterval: input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } - - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - upgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + validateRancherCluster() + By("Running downgrade on provisioningv1 cluster later") upgradeInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" - upgradeInput.AdditionalValues["rancherTurtles.features.managementv3-cluster-migration.enabled"] = "true" - - By("Running downgrade to provisioningv1 cluster") + upgradeInput.SkipCleanup = false testenv.UpgradeRancherTurtles(ctx, upgradeInput) - By("Waiting for the Rancher cluster record to be removed") + By("Waiting for the new Rancher cluster record to be removed") Eventually(komega.Get(rancherCluster), deleteClusterWait...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be unimported (deleted)") By("CAPI cluster should NOT have the 'imported' annotation") Consistently(func() bool { - Eventually(komega.Get(cluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) - annotations := cluster.GetAnnotations() + Eventually(komega.Get(capiCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + annotations := capiCluster.GetAnnotations() _, found := annotations["imported"] return !found }, 5*time.Second).Should(BeTrue(), "'imported' annotation is NOT expected on CAPI cluster") - By("Rancher should be available using new cluster import") - ValidateLegacyRancherCluster(ctx, &ValidateLegacyRancherClusterInput{ - Namespace: namespace, - CAPICluster: cluster, - RancherKubeconfig: rancherKubeconfig, - RancherLegacyCluster: rancherLegacyCluster, - MigrateClusterUsingGitOpsSpecInput: &input, - }) + By("Rancher should be available using old cluster import") + validateLegacyRancherCluster() }) AfterEach(func() { @@ -667,6 +420,16 @@ func DowngradeToV1UsingGitOpsSpec(ctx context.Context, inputGetter func() Migrat fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) } - e2e.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, capiCluster, input.E2EConfig.GetIntervals, skipCapiClusterCleanup) + By("Deleting GitRepo from Rancher") + turtlesframework.FleetDeleteGitRepo(ctx, turtlesframework.FleetDeleteGitRepoInput{ + Name: repoName, + Namespace: turtlesframework.FleetLocalNamespace, + ClusterProxy: input.BootstrapClusterProxy, + }) + + By("Waiting for the rancher cluster record to be removed") + Eventually(komega.Get(rancherCluster), deleteClusterWait...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") + + e2e.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, capiCluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/suites/migrate-gitops-v1-v3/suite_test.go b/test/e2e/suites/migrate-gitops-v1-v3/suite_test.go deleted file mode 100644 index 719bcb62..00000000 --- a/test/e2e/suites/migrate-gitops-v1-v3/suite_test.go +++ /dev/null @@ -1,248 +0,0 @@ -//go:build e2e -// +build e2e - -/* -Copyright © 2023 - 2024 SUSE LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package migrate_gitops_v1_v3 - -import ( - "context" - "fmt" - "os" - "path/filepath" - "runtime" - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "k8s.io/klog/v2" - "sigs.k8s.io/cluster-api/test/framework/clusterctl" - ctrl "sigs.k8s.io/controller-runtime" - - "github.com/rancher/turtles/test/e2e" - turtlesframework "github.com/rancher/turtles/test/framework" - "github.com/rancher/turtles/test/testenv" -) - -// Test suite flags. -var ( - flagVals *e2e.FlagValues -) - -// Test suite global vars. -var ( - // e2eConfig to be used for this test, read from configPath. - e2eConfig *clusterctl.E2EConfig - - // clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository - // with the providers specified in the configPath. - clusterctlConfigPath string - - // hostName is the host name for the Rancher Manager server. - hostName string - - ctx = context.Background() - - setupClusterResult *testenv.SetupTestClusterResult - giteaResult *testenv.DeployGiteaResult -) - -func init() { - flagVals = &e2e.FlagValues{} - e2e.InitFlags(flagVals) -} - -func TestE2E(t *testing.T) { - RegisterFailHandler(Fail) - - ctrl.SetLogger(klog.Background()) - - RunSpecs(t, "rancher-turtles-e2e-migrate-gitops-v1-v3") -} - -var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - - dockerUsername := "" - dockerPassword := "" - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - } - - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, - }) - - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherHost: hostName, - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - if !flagVals.IsolatedMode { - // i.e. we are using ngrok locally - rancherInput.RancherIngressConfig = e2e.IngressConfig - rancherInput.RancherServicePatch = e2e.RancherServicePatch - } - testenv.DeployRancher(ctx, rancherInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: "https://rancher.github.io/turtles", - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Version: "v0.6.0", - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } - testenv.DeployRancherTurtles(ctx, rtInput) - - testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartsPath: flagVals.ChartPath, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - }) - - upgradeInput := testenv.UpgradeRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: rtInput.AdditionalValues, - } - - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - // disable the default management.cattle.io/v3 controller - rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" - - testenv.UpgradeRancherTurtles(ctx, upgradeInput) - - giteaValues := map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - "service.http.type": "NodePort", - } - - giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), - ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), - ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), - ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), - ValuesFilePath: "../../data/gitea/values.yaml", - Values: giteaValues, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - }) -}) - -var _ = AfterSuite(func() { - testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), - }) - - testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), - }) - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, - }) -}) - -func shortTestOnly() bool { - return GinkgoLabelFilter() == e2e.ShortTestLabel -} - -func localTestOnly() bool { - return GinkgoLabelFilter() == e2e.LocalTestLabel -} diff --git a/test/e2e/suites/migrate-gitops-v3-v1/migrate_gitops_mgmtv3_ provv1_test.go b/test/e2e/suites/migrate-gitops-v3-v1/migrate_gitops_mgmtv3_ provv1_test.go deleted file mode 100644 index f2c3e5b8..00000000 --- a/test/e2e/suites/migrate-gitops-v3-v1/migrate_gitops_mgmtv3_ provv1_test.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build e2e -// +build e2e - -/* -Copyright © 2023 - 2024 SUSE LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package migrate_gitops_v3_v1 - -import ( - _ "embed" - - . "github.com/onsi/ginkgo/v2" - "sigs.k8s.io/controller-runtime/pkg/envtest/komega" - - "k8s.io/utils/ptr" - - "github.com/rancher/turtles/test/e2e" - "github.com/rancher/turtles/test/e2e/specs" -) - -var _ = Describe("[Docker] [Kubeadm] - [provisioning.cattle.io/v1] Downgrade v3 to provisioning v1 cluster functionality should work", Label(e2e.ShortTestLabel), func() { - BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) - komega.SetContext(ctx) - }) - - specs.DowngradeToV1UsingGitOpsSpec(ctx, func() specs.MigrateClusterUsingGitOpsSpecInput { - return specs.MigrateClusterUsingGitOpsSpecInput{ - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, - E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, - ClusterTemplate: e2e.CAPIDockerKubeadm, - ClusterName: "clusterv3-migrated-v1", - ControlPlaneMachineCount: ptr.To(1), - WorkerMachineCount: ptr.To(1), - GitAddr: giteaResult.GitAddress, - GitAuthSecretName: e2e.AuthSecretName, - SkipCleanup: false, - SkipDeletionTest: false, - LabelNamespace: true, - TestClusterReimport: true, - RancherServerURL: hostName, - CAPIClusterCreateWaitName: "wait-rancher", - DeleteClusterWaitName: "wait-controllers", - CapiClusterOwnerLabel: e2e.CapiClusterOwnerLabel, - CapiClusterOwnerNamespaceLabel: e2e.CapiClusterOwnerNamespaceLabel, - OwnedLabelName: e2e.OwnedLabelName, - } - }) -}) diff --git a/test/e2e/suites/migrate-gitops-v1-v3/migrate_gitops_provv1_mgmtv3_test.go b/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go similarity index 90% rename from test/e2e/suites/migrate-gitops-v1-v3/migrate_gitops_provv1_mgmtv3_test.go rename to test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go index 01a04186..43a8ca51 100644 --- a/test/e2e/suites/migrate-gitops-v1-v3/migrate_gitops_provv1_mgmtv3_test.go +++ b/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go @@ -17,7 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migrate_gitops_v1_v3 +package migrate_gitops import ( _ "embed" @@ -37,8 +37,8 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Migrate v1 to m komega.SetContext(ctx) }) - specs.MigrateToV3UsingGitOpsSpec(ctx, func() specs.MigrateClusterUsingGitOpsSpecInput { - return specs.MigrateClusterUsingGitOpsSpecInput{ + specs.MigrateToV3UsingGitOpsSpec(ctx, func() specs.MigrateToV3UsingGitOpsSpecInput { + return specs.MigrateToV3UsingGitOpsSpecInput{ HelmBinaryPath: flagVals.HelmBinaryPath, ChartPath: flagVals.ChartPath, E2EConfig: e2eConfig, @@ -47,7 +47,7 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Migrate v1 to m ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, ArtifactFolder: flagVals.ArtifactFolder, ClusterTemplate: e2e.CAPIDockerKubeadm, - ClusterName: "clusterv1-migrated-v3", + ClusterName: "clusterv3-migrated", ControlPlaneMachineCount: ptr.To(1), WorkerMachineCount: ptr.To(1), GitAddr: giteaResult.GitAddress, diff --git a/test/e2e/suites/migrate-gitops-v3-v1/suite_test.go b/test/e2e/suites/migrate-gitops/suite_test.go similarity index 95% rename from test/e2e/suites/migrate-gitops-v3-v1/suite_test.go rename to test/e2e/suites/migrate-gitops/suite_test.go index 76b8b5a7..36f0f891 100644 --- a/test/e2e/suites/migrate-gitops-v3-v1/suite_test.go +++ b/test/e2e/suites/migrate-gitops/suite_test.go @@ -17,7 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migrate_gitops_v3_v1 +package migrate_gitops import ( "context" @@ -71,7 +71,7 @@ func TestE2E(t *testing.T) { ctrl.SetLogger(klog.Background()) - RunSpecs(t, "rancher-turtles-e2e-migrate-gitops-v3-v1") + RunSpecs(t, "rancher-turtles-e2e-migrate-gitops") } var _ = BeforeSuite(func() { @@ -83,16 +83,6 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - dockerUsername := "" - dockerPassword := "" - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - } - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -151,7 +141,7 @@ var _ = BeforeSuite(func() { ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } - if !flagVals.IsolatedMode { + if !flagVals.IsolatedMode && !flagVals.UseEKS { // i.e. we are using ngrok locally rancherInput.RancherIngressConfig = e2e.IngressConfig rancherInput.RancherServicePatch = e2e.RancherServicePatch @@ -190,6 +180,8 @@ var _ = BeforeSuite(func() { // NOTE: this was the default previously in the chart locally and ok as // we where loading the image into kind manually. rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" + rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller testenv.UpgradeRancherTurtles(ctx, upgradeInput)