From 75d682ea81ce012ba8c00e2512c847d741ea65eb Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Tue, 25 Jun 2024 14:58:08 +0200 Subject: [PATCH 1/3] Opt-in for cluster downgrade scenario with managementv3-cluster-migration Signed-off-by: Danil-Grigorev --- .../rancher-turtles/templates/post-upgrade-job.yaml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/charts/rancher-turtles/templates/post-upgrade-job.yaml b/charts/rancher-turtles/templates/post-upgrade-job.yaml index 75215371..b0915bcd 100644 --- a/charts/rancher-turtles/templates/post-upgrade-job.yaml +++ b/charts/rancher-turtles/templates/post-upgrade-job.yaml @@ -1,4 +1,4 @@ -{{- if and (eq (index .Values "rancherTurtles" "features" "managementv3-cluster" "enabled") true) (eq (index .Values "rancherTurtles" "features" "managementv3-cluster-migration" "enabled") true) }} +{{- if eq (index .Values "rancherTurtles" "features" "managementv3-cluster-migration" "enabled") true }} --- apiVersion: v1 kind: ServiceAccount @@ -24,6 +24,13 @@ rules: verbs: - list - delete +- apiGroups: + - management.cattle.io + resources: + - clusters + verbs: + - list + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -58,7 +65,11 @@ spec: image: {{ index .Values "rancherTurtles" "features" "rancher-webhook" "kubectlImage" }} args: - delete + {{- if eq (index .Values "rancherTurtles" "features" "managementv3-cluster" "enabled") true }} - clusters.provisioning.cattle.io + {{- else }} + - clusters.management.cattle.io + {{- end }} - --selector=cluster-api.cattle.io/owned - -A - --ignore-not-found=true From 47dbec61b5f849163cbaca6389b040e9e4c62e2c Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Wed, 26 Jun 2024 15:15:51 +0200 Subject: [PATCH 2/3] Perform e2e test for downgrade to provv1 with automigrate enabled - Alternative: caa656f Signed-off-by: Danil-Grigorev --- .../e2e/specs/migrate_gitops_provv1_mgmtv3.go | 20 ++ test/e2e/suites/migrate-gitops/suite_test.go | 212 +++--------------- test/testenv/turtles.go | 27 ++- 3 files changed, 68 insertions(+), 191 deletions(-) diff --git a/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go b/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go index c9e76e82..069ccb22 100644 --- a/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go +++ b/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go @@ -362,6 +362,7 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateT Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), Tag: "v0.0.1", WaitDeploymentsReadyInterval: input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-controllers"), + SkipCleanup: true, AdditionalValues: map[string]string{}, } @@ -387,6 +388,25 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateT By("Rancher should be available using new cluster import") validateRancherCluster() + + By("Running downgrade on provisioningv1 cluster later") + upgradeInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" + upgradeInput.SkipCleanup = false + testenv.UpgradeRancherTurtles(ctx, upgradeInput) + + By("Waiting for the new Rancher cluster record to be removed") + Eventually(komega.Get(rancherCluster), deleteClusterWait...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be unimported (deleted)") + + By("CAPI cluster should NOT have the 'imported' annotation") + Consistently(func() bool { + Eventually(komega.Get(capiCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + annotations := capiCluster.GetAnnotations() + _, found := annotations["imported"] + return !found + }, 5*time.Second).Should(BeTrue(), "'imported' annotation is NOT expected on CAPI cluster") + + By("Rancher should be available using old cluster import") + validateLegacyRancherCluster() }) AfterEach(func() { diff --git a/test/e2e/suites/migrate-gitops/suite_test.go b/test/e2e/suites/migrate-gitops/suite_test.go index b9738e3e..36f0f891 100644 --- a/test/e2e/suites/migrate-gitops/suite_test.go +++ b/test/e2e/suites/migrate-gitops/suite_test.go @@ -34,7 +34,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "github.com/rancher/turtles/test/e2e" - "github.com/rancher/turtles/test/framework" turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" ) @@ -84,16 +83,6 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - dockerUsername := "" - dockerPassword := "" - if flagVals.UseEKS { - Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") - dockerUsername = os.Getenv("GITHUB_USERNAME") - Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") - dockerPassword = os.Getenv("GITHUB_TOKEN") - Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") - } - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) @@ -132,28 +121,6 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) - if flagVals.UseEKS { - By("Getting ingress hostname") - svcRes := &testenv.WaitForServiceIngressHostnameResult{} - testenv.WaitForServiceIngressHostname(ctx, testenv.WaitForServiceIngressHostnameInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - ServiceName: "ingress-nginx-controller", - ServiceNamespace: "ingress-nginx", - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }, svcRes) - hostName = svcRes.Hostname - - By("Deploying ghcr details") - framework.CreateDockerRegistrySecret(ctx, framework.CreateDockerRegistrySecretInput{ - Name: "regcred", - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - Namespace: "rancher-turtles-system", - DockerServer: "https://ghcr.io", - DockerUsername: dockerUsername, - DockerPassword: dockerPassword, - }) - } - rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, @@ -179,133 +146,50 @@ var _ = BeforeSuite(func() { rancherInput.RancherIngressConfig = e2e.IngressConfig rancherInput.RancherServicePatch = e2e.RancherServicePatch } - if flagVals.UseEKS { - rancherInput.RancherIngressClassName = "nginx" - } testenv.DeployRancher(ctx, rancherInput) - if shortTestOnly() { - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: "https://rancher.github.io/turtles", - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Version: "v0.6.0", - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } - testenv.DeployRancherTurtles(ctx, rtInput) - - testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartsPath: flagVals.ChartPath, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - }) - - upgradeInput := testenv.UpgradeRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: rtInput.AdditionalValues, - } - - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } - - rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" - rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller - - testenv.UpgradeRancherTurtles(ctx, upgradeInput) - } else { - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } - if flagVals.UseEKS { - rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } - - rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller - testenv.DeployRancherTurtles(ctx, rtInput) + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartPath: "https://rancher.github.io/turtles", + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Version: "v0.6.0", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{}, } + testenv.DeployRancherTurtles(ctx, rtInput) - if !shortTestOnly() && !localTestOnly() { - By("Running full tests, deploying additional infrastructure providers") - awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) - Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") - - testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - CAPIProvidersSecretsYAML: [][]byte{ - e2e.AWSProviderSecret, - e2e.AzureIdentitySecret, - }, - CAPIProvidersYAML: e2e.FullProviders, - TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), - }, - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - WaitForDeployments: []testenv.NamespaceName{ - { - Name: "capa-controller-manager", - Namespace: "capa-system", - }, - { - Name: "capz-controller-manager", - Namespace: "capz-system", - }, - }, - }) - } else if Label(e2e.LocalTestLabel).MatchesLabelFilter(GinkgoLabelFilter()) { - By("Running local vSphere tests, deploying vSphere infrastructure provider") - - testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - CAPIProvidersSecretsYAML: [][]byte{ - e2e.VSphereProviderSecret, - }, - CAPIProvidersYAML: e2e.CapvProvider, - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - WaitForDeployments: []testenv.NamespaceName{ - { - Name: "capv-controller-manager", - Namespace: "capv-system", - }, - }, - }) + testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartsPath: flagVals.ChartPath, + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + }) + + upgradeInput := testenv.UpgradeRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), + Tag: "v0.0.1", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: rtInput.AdditionalValues, } + // NOTE: this was the default previously in the chart locally and ok as + // we where loading the image into kind manually. + rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" + rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" + rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller + + testenv.UpgradeRancherTurtles(ctx, upgradeInput) + giteaValues := map[string]string{ "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), "service.http.type": "NodePort", } - if flagVals.UseEKS { - giteaValues["service.http.type"] = "LoadBalancer" - } giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, @@ -345,36 +229,6 @@ var _ = AfterSuite(func() { }) }) -var _ = AfterEach(func() { - testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartsPath: flagVals.ChartPath, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - }) - - upgradeInput := testenv.UpgradeRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } - - if flagVals.UseEKS { - upgradeInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" - upgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - } else { - // NOTE: this was the default previously in the chart locally and ok as - // we where loading the image into kind manually. - upgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - } - - testenv.UpgradeRancherTurtles(ctx, upgradeInput) -}) - func shortTestOnly() bool { return GinkgoLabelFilter() == e2e.ShortTestLabel } diff --git a/test/testenv/turtles.go b/test/testenv/turtles.go index ab1e2197..4f2661e8 100644 --- a/test/testenv/turtles.go +++ b/test/testenv/turtles.go @@ -194,6 +194,7 @@ type UpgradeRancherTurtlesInput struct { AdditionalValues map[string]string Image string Tag string + SkipCleanup bool } func UpgradeRancherTurtles(ctx context.Context, input UpgradeRancherTurtlesInput) { @@ -211,18 +212,20 @@ func UpgradeRancherTurtles(ctx context.Context, input UpgradeRancherTurtlesInput additionalValues = append(additionalValues, "--set", fmt.Sprintf("%s=%s", name, val)) } - defer func() { - values := []string{"repo", "remove", "rancher-turtles-local"} - cmd := exec.Command( - input.HelmBinaryPath, - values..., - ) - cmd.WaitDelay = time.Minute - out, err := cmd.CombinedOutput() - if err != nil { - Expect(fmt.Errorf("Unable to perform chart removal: %w\nOutput: %s, Command: %s", err, out, strings.Join(append(values, additionalValues...), " "))).ToNot(HaveOccurred()) - } - }() + if !input.SkipCleanup { + defer func() { + values := []string{"repo", "remove", "rancher-turtles-local"} + cmd := exec.Command( + input.HelmBinaryPath, + values..., + ) + cmd.WaitDelay = time.Minute + out, err := cmd.CombinedOutput() + if err != nil { + Expect(fmt.Errorf("Unable to perform chart removal: %w\nOutput: %s, Command: %s", err, out, strings.Join(append(values, additionalValues...), " "))).ToNot(HaveOccurred()) + } + }() + } values := []string{"repo", "update"} cmd := exec.Command( From cbe0e639140f41a320a0c4bc28a56486c78996dd Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Thu, 11 Jul 2024 14:51:42 +0200 Subject: [PATCH 3/3] Update v1 controller to cleanup finalizer on CAPI clusters Signed-off-by: Danil-Grigorev --- internal/controllers/import_controller.go | 55 ++++++++++++++++++- .../controllers/import_controller_test.go | 19 +++++++ internal/controllers/import_controller_v3.go | 12 ++-- main.go | 9 +++ 4 files changed, 86 insertions(+), 9 deletions(-) diff --git a/internal/controllers/import_controller.go b/internal/controllers/import_controller.go index ee909f7b..60da431a 100644 --- a/internal/controllers/import_controller.go +++ b/internal/controllers/import_controller.go @@ -30,10 +30,12 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -43,6 +45,7 @@ import ( "sigs.k8s.io/cluster-api/util/predicates" "github.com/rancher/turtles/feature" + managementv3 "github.com/rancher/turtles/internal/rancher/management/v3" provisioningv1 "github.com/rancher/turtles/internal/rancher/provisioning/v1" "github.com/rancher/turtles/util" turtlesannotations "github.com/rancher/turtles/util/annotations" @@ -143,6 +146,16 @@ func (r *CAPIImportReconciler) Reconcile(ctx context.Context, req ctrl.Request) log = log.WithValues("cluster", capiCluster.Name) + // Collect errors as an aggregate to return together after all patches have been performed. + var errs []error + + if !capiCluster.ObjectMeta.DeletionTimestamp.IsZero() && controllerutil.RemoveFinalizer(capiCluster, managementv3.CapiClusterFinalizer) { + if err := r.Client.Patch(ctx, capiCluster, patchBase); err != nil { + log.Error(err, "failed to remove CAPI cluster finalizer "+managementv3.CapiClusterFinalizer) + errs = append(errs, err) + } + } + // Wait for controlplane to be ready. This should never be false as the predicates // do the filtering. if !capiCluster.Status.ControlPlaneReady && !conditions.IsTrue(capiCluster, clusterv1.ControlPlaneReadyCondition) { @@ -150,9 +163,6 @@ func (r *CAPIImportReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: defaultRequeueDuration}, nil } - // Collect errors as an aggregate to return together after all patches have been performed. - var errs []error - result, err := r.reconcile(ctx, capiCluster) if err != nil { errs = append(errs, fmt.Errorf("error reconciling cluster: %w", err)) @@ -350,3 +360,42 @@ func (r *CAPIImportReconciler) reconcileDelete(ctx context.Context, capiCluster return ctrl.Result{}, nil } + +// CAPIDowngradeReconciler is a reconciler for downgraded managementv3 clusters. +type CAPIDowngradeReconciler struct { + RancherClient client.Client + Scheme *runtime.Scheme +} + +// SetupWithManager sets up reconciler with manager. +func (r *CAPIDowngradeReconciler) SetupWithManager(_ context.Context, mgr ctrl.Manager, options controller.Options) error { + if err := ctrl.NewControllerManagedBy(mgr). + For(&managementv3.Cluster{}). + WithOptions(options). + WithEventFilter(predicate.NewPredicateFuncs(func(object client.Object) bool { + _, exist := object.GetLabels()[ownedLabelName] + return exist + })). + Complete(reconcile.AsReconciler(r.RancherClient, r)); err != nil { + return fmt.Errorf("creating new downgrade controller: %w", err) + } + + return nil +} + +// Reconcile performs check for downgraded clusters and removes finalizer on the clusters still owned by the previous management v3 controller. +func (r *CAPIDowngradeReconciler) Reconcile(ctx context.Context, cluster *managementv3.Cluster) (res ctrl.Result, err error) { + log := log.FromContext(ctx) + + patchBase := client.MergeFromWithOptions(cluster.DeepCopy(), client.MergeFromWithOptimisticLock{}) + + if !controllerutil.RemoveFinalizer(cluster, managementv3.CapiClusterFinalizer) { + return + } + + if err = r.RancherClient.Patch(ctx, cluster, patchBase); err != nil { + log.Error(err, "Unable to remove turtles finalizer from cluster"+cluster.Name) + } + + return +} diff --git a/internal/controllers/import_controller_test.go b/internal/controllers/import_controller_test.go index 422f5124..2223639a 100644 --- a/internal/controllers/import_controller_test.go +++ b/internal/controllers/import_controller_test.go @@ -146,6 +146,25 @@ var _ = Describe("reconcile CAPI Cluster", func() { }).Should(Succeed()) }) + It("should remove a CAPI cluster with turtles finalizer", func() { + capiCluster.Finalizers = []string{managementv3.CapiClusterFinalizer} + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + Expect(cl.Delete(ctx, capiCluster)).To(Succeed()) + + Eventually(func(g Gomega) { + _, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cl.Get(ctx, client.ObjectKeyFromObject(capiCluster), capiCluster)).To(HaveOccurred()) + }).Should(Succeed()) + }) + It("should reconcile a CAPI cluster when rancher cluster doesn't exist", func() { capiCluster.Labels = map[string]string{ importLabelName: "true", diff --git a/internal/controllers/import_controller_v3.go b/internal/controllers/import_controller_v3.go index 38936675..dc9780c6 100644 --- a/internal/controllers/import_controller_v3.go +++ b/internal/controllers/import_controller_v3.go @@ -171,17 +171,15 @@ func (r *CAPIImportManagementV3Reconciler) Reconcile(ctx context.Context, req ct // Collect errors as an aggregate to return together after all patches have been performed. var errs []error + patchBase := client.MergeFromWithOptions(capiCluster.DeepCopy(), client.MergeFromWithOptimisticLock{}) + result, err := r.reconcile(ctx, capiCluster) if err != nil { errs = append(errs, fmt.Errorf("error reconciling cluster: %w", err)) } if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - capiClusterCopy := capiCluster.DeepCopy() - - patchBase := client.MergeFromWithOptions(capiCluster, client.MergeFromWithOptimisticLock{}) - - if err := r.Client.Patch(ctx, capiClusterCopy, patchBase); err != nil { + if err := r.Client.Patch(ctx, capiCluster, patchBase); err != nil { errs = append(errs, fmt.Errorf("failed to patch cluster: %w", err)) } return nil @@ -456,6 +454,8 @@ func (r *CAPIImportManagementV3Reconciler) reconcileDelete(ctx context.Context, capiCluster.Name, turtlesannotations.ClusterImportedAnnotation)) + patchBase := client.MergeFromWithOptions(capiCluster.DeepCopy(), client.MergeFromWithOptimisticLock{}) + annotations := capiCluster.GetAnnotations() if annotations == nil { annotations = map[string]string{} @@ -465,7 +465,7 @@ func (r *CAPIImportManagementV3Reconciler) reconcileDelete(ctx context.Context, capiCluster.SetAnnotations(annotations) controllerutil.RemoveFinalizer(capiCluster, managementv3.CapiClusterFinalizer) - if err := r.Client.Update(ctx, capiCluster); err != nil { + if err := r.Client.Patch(ctx, capiCluster, patchBase); err != nil { return fmt.Errorf("error removing finalizer: %w", err) } diff --git a/main.go b/main.go index a1256a5e..7b0df8dd 100644 --- a/main.go +++ b/main.go @@ -226,6 +226,15 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { setupLog.Error(err, "unable to create capi controller") os.Exit(1) } + + if err := (&controllers.CAPIDowngradeReconciler{ + RancherClient: rancherClient, + }).SetupWithManager(ctx, mgr, controller.Options{ + MaxConcurrentReconciles: concurrencyNumber, + }); err != nil { + setupLog.Error(err, "unable to create rancher management v3 downgrade controller") + os.Exit(1) + } } if feature.Gates.Enabled(feature.RancherKubeSecretPatch) {