diff --git a/Makefile b/Makefile index 334ddfed..3a9958d7 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,7 @@ ARCH ?= amd64 OS ?= $(shell uname -s | tr A-Z a-z) K8S_LATEST_VER ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) export CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME) -TAG ?= main +TAG ?= dev # Get cluster-api version and build ldflags clusterapi := $(shell go list -m sigs.k8s.io/cluster-api) @@ -220,6 +220,9 @@ create-cluster: $(KIND) $(CLUSTERCTL) $(KUBECTL) $(ENVSUBST) ## Create a new kin @echo wait for calico pod $(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig wait --for=condition=Available deployment/calico-kube-controllers -n kube-system --timeout=$(TIMEOUT) + @echo apply reloader CRD to managed cluster + $(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig apply -f https://raw.githubusercontent.com/projectsveltos/libsveltos/$(TAG)/config/crd/bases/lib.projectsveltos.io_reloaders.yaml + .PHONY: delete-cluster delete-cluster: $(KIND) ## Deletes the kind cluster $(CONTROL_CLUSTER_NAME) $(KIND) delete cluster --name $(CONTROL_CLUSTER_NAME) diff --git a/api/v1alpha1/clusterprofile_types.go b/api/v1alpha1/clusterprofile_types.go index d10b24da..573ad1af 100644 --- a/api/v1alpha1/clusterprofile_types.go +++ b/api/v1alpha1/clusterprofile_types.go @@ -251,6 +251,15 @@ type ClusterProfileSpec struct { // +optional StopMatchingBehavior StopMatchingBehavior `json:"stopMatchingBehavior,omitempty"` + // Reloader indicates whether Deployment/StatefulSet/DaemonSet instances deployed + // by Sveltos and part of this ClusterProfile need to be restarted via rolling upgrade + // when a ConfigMap/Secret instance mounted as volume is modified. + // When set to true, when any mounted ConfigMap/Secret is modified, Sveltos automatically + // starts a rolling upgrade for Deployment/StatefulSet/DaemonSet instances mounting it. + // +kubebuilder:default:=false + // +optional + Reloader bool `json:"reloader,omitempty"` + // TemplateResourceRefs is a list of resource to collect from the management cluster. // Those resources' values will be used to instantiate templates contained in referenced // PolicyRefs and Helm charts diff --git a/config/crd/bases/config.projectsveltos.io_clusterprofiles.yaml b/config/crd/bases/config.projectsveltos.io_clusterprofiles.yaml index 637b13e1..5306b956 100644 --- a/config/crd/bases/config.projectsveltos.io_clusterprofiles.yaml +++ b/config/crd/bases/config.projectsveltos.io_clusterprofiles.yaml @@ -246,6 +246,16 @@ spec: - namespace type: object type: array + reloader: + default: false + description: Reloader indicates whether Deployment/StatefulSet/DaemonSet + instances deployed by Sveltos and part of this ClusterProfile need + to be restarted via rolling upgrade when a ConfigMap/Secret instance + mounted as volume is modified. When set to true, when any mounted + ConfigMap/Secret is modified, Sveltos automatically starts a rolling + upgrade for Deployment/StatefulSet/DaemonSet instances mounting + it. + type: boolean stopMatchingBehavior: default: WithdrawPolicies description: StopMatchingBehavior indicates what behavior should be diff --git a/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml b/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml index d5e3acb0..b4007f90 100644 --- a/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml +++ b/config/crd/bases/config.projectsveltos.io_clustersummaries.yaml @@ -262,6 +262,16 @@ spec: - namespace type: object type: array + reloader: + default: false + description: Reloader indicates whether Deployment/StatefulSet/DaemonSet + instances deployed by Sveltos and part of this ClusterProfile + need to be restarted via rolling upgrade when a ConfigMap/Secret + instance mounted as volume is modified. When set to true, when + any mounted ConfigMap/Secret is modified, Sveltos automatically + starts a rolling upgrade for Deployment/StatefulSet/DaemonSet + instances mounting it. + type: boolean stopMatchingBehavior: default: WithdrawPolicies description: StopMatchingBehavior indicates what behavior should diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index b0e3a6ce..69525b25 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: projectsveltos/addon-controller-amd64:main + - image: projectsveltos/addon-controller-amd64:dev name: controller diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go index 24546d89..bdcdcbfb 100644 --- a/controllers/controllers_suite_test.go +++ b/controllers/controllers_suite_test.go @@ -114,6 +114,12 @@ var _ = BeforeSuite(func() { Expect(testEnv.Create(context.TODO(), addonComplianceCRD)).To(Succeed()) Expect(waitForObject(context.TODO(), testEnv, addonComplianceCRD)).To(Succeed()) + var reloaderCRD *unstructured.Unstructured + reloaderCRD, err = utils.GetUnstructured(libsveltoscrd.GetReloaderCRDYAML()) + Expect(err).To(BeNil()) + Expect(testEnv.Create(context.TODO(), reloaderCRD)).To(Succeed()) + Expect(waitForObject(context.TODO(), testEnv, reloaderCRD)).To(Succeed()) + // Wait for synchronization // Sometimes we otherwise get "no matches for kind "AddonCompliance" in version "lib.projectsveltos.io/v1alpha1" time.Sleep(2 * time.Second) diff --git a/controllers/export_test.go b/controllers/export_test.go index 0740d0ef..f0bf32a1 100644 --- a/controllers/export_test.go +++ b/controllers/export_test.go @@ -133,3 +133,14 @@ var ( RunLuaValidations = runLuaValidations LuaValidation = luaValidation ) + +// reloader utils +var ( + WatchForRollingUpgrade = watchForRollingUpgrade + CreateReloaderInstance = createReloaderInstance + DeployReloaderInstance = deployReloaderInstance + RemoveReloaderInstance = removeReloaderInstance + UpdateReloaderWithDeployedResources = updateReloaderWithDeployedResources + ConvertResourceReportsToObjectReference = convertResourceReportsToObjectReference + ConvertHelmResourcesToObjectReference = convertHelmResourcesToObjectReference +) diff --git a/controllers/handlers_helm.go b/controllers/handlers_helm.go index c8374c7e..af34e3fb 100644 --- a/controllers/handlers_helm.go +++ b/controllers/handlers_helm.go @@ -129,15 +129,42 @@ func deployHelmCharts(ctx context.Context, c client.Client, return err } + var helmResources []libsveltosv1alpha1.HelmResources + if clusterSummary.Spec.ClusterProfileSpec.SyncMode == configv1alpha1.SyncModeContinuousWithDriftDetection || + clusterSummary.Spec.ClusterProfileSpec.Reloader { + + helmResources, err = collectResourcesFromManagedHelmCharts(ctx, c, clusterSummary, kubeconfig, logger) + if err != nil { + return err + } + } + if clusterSummary.Spec.ClusterProfileSpec.SyncMode == configv1alpha1.SyncModeContinuousWithDriftDetection { // Deploy resourceSummary - err = deployResourceSummaryWithHelmResources(ctx, c, clusterNamespace, clusterName, - clusterType, clusterSummary, kubeconfig, logger) + err = deployResourceSummaryInCluster(ctx, c, clusterNamespace, clusterName, clusterSummary.Name, + clusterType, nil, nil, helmResources, logger) if err != nil { - return err + return nil } } + clusterProfileOwnerRef, err := configv1alpha1.GetClusterProfileOwnerReference(clusterSummary) + if err != nil { + return err + } + + // Update Reloader instance. If ClusterProfile Reloader knob is set to true, sveltos will + // start a rolling upgrade for all Deployment/StatefulSet/DaemonSet instances deployed by Sveltos + // in the managed cluster when a mounted ConfigMap/Secret is updated. In order to do so, sveltos-agent + // needs to be instructed which Deployment/StatefulSet/DaemonSet instances require this behavior. + // Update corresponding Reloader instance (instance will be deleted if Reloader is set to false) + resources := convertHelmResourcesToObjectReference(helmResources) + err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureHelm, + resources, clusterSummary, logger) + if err != nil { + return err + } + return nil } @@ -161,6 +188,8 @@ func undeployHelmCharts(ctx context.Context, c client.Client, logger = logger.WithValues("clusterSummary", clusterSummary.Name) logger = logger.WithValues("admin", fmt.Sprintf("%s/%s", adminNamespace, adminName)) + logger.V(logs.LogDebug).Info("undeployHelmCharts") + kubeconfigContent, err := clusterproxy.GetSecretData(ctx, c, clusterNamespace, clusterName, adminNamespace, adminName, clusterSummary.Spec.ClusterType, logger) if err != nil { @@ -194,6 +223,13 @@ func undeployHelmCharts(ctx context.Context, c client.Client, if err != nil { return err } + + err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureKustomize, + nil, clusterSummary, logger) + if err != nil { + return err + } + err = updateClusterConfiguration(ctx, c, clusterSummary, clusterProfileOwnerRef, configv1alpha1.FeatureHelm, nil, []configv1alpha1.Chart{}) if err != nil { @@ -1393,13 +1429,16 @@ func getInstantiatedValues(ctx context.Context, clusterSummary *configv1alpha1.C return chartutil.ReadValues([]byte(instantiatedValues)) } -func deployResourceSummaryWithHelmResources(ctx context.Context, c client.Client, - clusterNamespace, clusterName string, clusterType libsveltosv1alpha1.ClusterType, - clusterSummary *configv1alpha1.ClusterSummary, kubeconfig string, logger logr.Logger) error { +// collectResourcesFromManagedHelmCharts collects resources considering all +// helm charts contained in a ClusterSummary that are currently managed by the +// ClusterProfile instance +func collectResourcesFromManagedHelmCharts(ctx context.Context, c client.Client, + clusterSummary *configv1alpha1.ClusterSummary, kubeconfig string, logger logr.Logger, +) ([]libsveltosv1alpha1.HelmResources, error) { chartManager, err := chartmanager.GetChartManagerInstance(ctx, c) if err != nil { - return err + return nil, err } helmResources := make([]libsveltosv1alpha1.HelmResources, 0) @@ -1412,18 +1451,18 @@ func deployResourceSummaryWithHelmResources(ctx context.Context, c client.Client actionConfig, err := actionConfigInit(currentChart.ReleaseNamespace, kubeconfig, logger) if err != nil { - return err + return nil, err } statusObject := action.NewStatus(actionConfig) results, err := statusObject.Run(currentChart.ReleaseName) if err != nil { - return err + return nil, err } resources, err := collectHelmContent(results.Manifest, logger) if err != nil { - return err + return nil, err } l.V(logs.LogDebug).Info(fmt.Sprintf("found %d resources", len(resources))) @@ -1439,8 +1478,7 @@ func deployResourceSummaryWithHelmResources(ctx context.Context, c client.Client } } - return deployResourceSummaryInCluster(ctx, c, clusterNamespace, clusterName, clusterSummary.Name, - clusterType, nil, nil, helmResources, logger) + return helmResources, nil } func collectHelmContent(manifest string, logger logr.Logger) ([]*unstructured.Unstructured, error) { diff --git a/controllers/handlers_kustomize.go b/controllers/handlers_kustomize.go index c1be6def..9969fced 100644 --- a/controllers/handlers_kustomize.go +++ b/controllers/handlers_kustomize.go @@ -103,6 +103,12 @@ func deployKustomizeRefs(ctx context.Context, c client.Client, if err != nil { return err } + remoteResources := convertResourceReportsToObjectReference(remoteResourceReports) + err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureKustomize, + remoteResources, clusterSummary, logger) + if err != nil { + return err + } // If we are here there are no conflicts (and error would have been returned by deployKustomizeRef) remoteDeployed := make([]configv1alpha1.Resource, 0) @@ -116,15 +122,9 @@ func deployKustomizeRefs(ctx context.Context, c client.Client, return err } - // Clean stale resources in the management cluster - _, err = cleanKustomizeResources(ctx, getManagementClusterConfig(), c, clusterSummary, localResourceReports, logger) - if err != nil { - return err - } - - // Clean stale resources in the remote cluster var undeployed []configv1alpha1.ResourceReport - undeployed, err = cleanKustomizeResources(ctx, remoteRestConfig, remoteClient, clusterSummary, remoteResourceReports, logger) + _, undeployed, err = cleanStaleKustomizeResources(ctx, remoteRestConfig, remoteClient, clusterSummary, + localResourceReports, remoteResourceReports, logger) if err != nil { return err } @@ -152,6 +152,26 @@ func deployKustomizeRefs(ctx context.Context, c client.Client, return nil } +func cleanStaleKustomizeResources(ctx context.Context, remoteRestConfig *rest.Config, remoteClient client.Client, + clusterSummary *configv1alpha1.ClusterSummary, localResourceReports, remoteResourceReports []configv1alpha1.ResourceReport, + logger logr.Logger) (localUndeployed, remoteUndeployed []configv1alpha1.ResourceReport, err error) { + // Clean stale resources in the management cluster + localUndeployed, err = cleanKustomizeResources(ctx, getManagementClusterConfig(), getManagementClusterClient(), + clusterSummary, localResourceReports, logger) + if err != nil { + return + } + + // Clean stale resources in the remote cluster + remoteUndeployed, err = cleanKustomizeResources(ctx, remoteRestConfig, remoteClient, + clusterSummary, remoteResourceReports, logger) + if err != nil { + return + } + + return +} + func undeployKustomizeRefs(ctx context.Context, c client.Client, clusterNamespace, clusterName, applicant, _ string, clusterType libsveltosv1alpha1.ClusterType, @@ -207,6 +227,12 @@ func undeployKustomizeRefs(ctx context.Context, c client.Client, return err } + err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureKustomize, + nil, clusterSummary, logger) + if err != nil { + return err + } + err = updateClusterConfiguration(ctx, c, clusterSummary, clusterProfileOwnerRef, configv1alpha1.FeatureKustomize, []configv1alpha1.Resource{}, nil) if err != nil { diff --git a/controllers/handlers_resources.go b/controllers/handlers_resources.go index 667c0e70..6403704d 100644 --- a/controllers/handlers_resources.go +++ b/controllers/handlers_resources.go @@ -88,6 +88,13 @@ func deployResources(ctx context.Context, c client.Client, return err } + remoteResources := convertResourceReportsToObjectReference(remoteResourceReports) + err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureResources, + remoteResources, clusterSummary, logger) + if err != nil { + return err + } + // If we are here there are no conflicts (and error would have been returned by deployPolicyRefs) remoteDeployed := make([]configv1alpha1.Resource, 0) for i := range remoteResourceReports { @@ -100,15 +107,9 @@ func deployResources(ctx context.Context, c client.Client, return err } - // Clean stale resources in the management cluster - _, err = cleanPolicyRefResources(ctx, getManagementClusterConfig(), c, clusterSummary, localResourceReports, logger) - if err != nil { - return err - } - - // Clean stale resources in the remote cluster var undeployed []configv1alpha1.ResourceReport - undeployed, err = cleanPolicyRefResources(ctx, remoteRestConfig, remoteClient, clusterSummary, remoteResourceReports, logger) + _, undeployed, err = cleanStaleResources(ctx, remoteRestConfig, remoteClient, clusterSummary, + localResourceReports, remoteResourceReports, logger) if err != nil { return err } @@ -136,6 +137,27 @@ func deployResources(ctx context.Context, c client.Client, return nil } +func cleanStaleResources(ctx context.Context, remoteRestConfig *rest.Config, remoteClient client.Client, + clusterSummary *configv1alpha1.ClusterSummary, localResourceReports, remoteResourceReports []configv1alpha1.ResourceReport, + logger logr.Logger) (localUndeployed, remoteUndeployed []configv1alpha1.ResourceReport, err error) { + + // Clean stale resources in the management cluster + localUndeployed, err = cleanPolicyRefResources(ctx, getManagementClusterConfig(), getManagementClusterClient(), + clusterSummary, localResourceReports, logger) + if err != nil { + return + } + + // Clean stale resources in the remote cluster + remoteUndeployed, err = cleanPolicyRefResources(ctx, remoteRestConfig, remoteClient, clusterSummary, + remoteResourceReports, logger) + if err != nil { + return + } + + return +} + // handleDriftDetectionManagerDeployment deploys, if sync mode is SyncModeContinuousWithDriftDetection, // drift-detection-manager in the managed clyuster func handleDriftDetectionManagerDeployment(ctx context.Context, clusterSummary *configv1alpha1.ClusterSummary, @@ -268,6 +290,12 @@ func undeployResources(ctx context.Context, c client.Client, return err } + err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureResources, + nil, clusterSummary, logger) + if err != nil { + return err + } + err = updateClusterConfiguration(ctx, c, clusterSummary, clusterProfileOwnerRef, configv1alpha1.FeatureResources, []configv1alpha1.Resource{}, nil) if err != nil { @@ -403,6 +431,8 @@ func deployPolicyRefs(ctx context.Context, c client.Client, remoteConfig *rest.C var objectsToDeployLocally []client.Object var objectsToDeployRemotely []client.Object + // collect all referenced ConfigMaps/Secrets whose content need to be deployed + // in the management cluster (local) or manaded cluster (remote) objectsToDeployLocally, objectsToDeployRemotely, err = collectReferencedObjects(ctx, c, clusterSummary.Namespace, refs, logger) if err != nil { diff --git a/controllers/handlers_utils.go b/controllers/handlers_utils.go index bd8409fc..f068723e 100644 --- a/controllers/handlers_utils.go +++ b/controllers/handlers_utils.go @@ -592,7 +592,11 @@ func collectReferencedObjects(ctx context.Context, controlClusterClient client.C return local, remote, nil } -// deployReferencedObjects deploys in a CAPI Cluster the policies contained in the Data section of each passed ConfigMap +// deployReferencedObjects deploys in a managed Cluster the resources contained in each referenced ConfigMap +// - objectsToDeployLocally is a list of ConfigMaps/Secrets whose content need to be deployed +// in the management cluster +// - objectsToDeployRemotely is a list of ConfigMaps/Secrets whose content need to be deployed +// in the managed cluster func deployReferencedObjects(ctx context.Context, c client.Client, remoteConfig *rest.Config, clusterSummary *configv1alpha1.ClusterSummary, objectsToDeployLocally, objectsToDeployRemotely []client.Object, logger logr.Logger) (localReports, remoteReports []configv1alpha1.ResourceReport, err error) { @@ -610,8 +614,8 @@ func deployReferencedObjects(ctx context.Context, c client.Client, remoteConfig var tmpResourceReports []configv1alpha1.ResourceReport - // Assume that if objects are deployed in the management clusters, those are needed before any resource is deployed - // in the managed cluster. So try to deploy those first if any. + // Assume that if objects are deployed in the management clusters, those are needed before any + // resource is deployed in the managed cluster. So try to deploy those first if any. localConfig := rest.CopyConfig(getManagementClusterConfig()) adminNamespace, adminName := getClusterSummaryAdmin(clusterSummary) diff --git a/controllers/reloader_utils.go b/controllers/reloader_utils.go new file mode 100644 index 00000000..f3774d0f --- /dev/null +++ b/controllers/reloader_utils.go @@ -0,0 +1,226 @@ +/* +Copyright 2022-23. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/sha256" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2/klogr" + "sigs.k8s.io/controller-runtime/pkg/client" + + configv1alpha1 "github.com/projectsveltos/addon-controller/api/v1alpha1" + libsveltosv1alpha1 "github.com/projectsveltos/libsveltos/api/v1alpha1" + "github.com/projectsveltos/libsveltos/lib/clusterproxy" + logs "github.com/projectsveltos/libsveltos/lib/logsettings" +) + +// removeReloaderInstance removes Reloader instance from the managed cluster +func removeReloaderInstance(ctx context.Context, remoteClient client.Client, + clusterProfileName string, feature configv1alpha1.FeatureID, logger logr.Logger) error { + + reloader, err := getReloaderInstance(ctx, remoteClient, clusterProfileName, + feature, klogr.New()) + if err != nil { + return err + } + + if reloader == nil { + return nil + } + + logger = logger.WithValues("reloader", reloader.Name) + logger.V(logs.LogDebug).Info("deleting reloader") + return remoteClient.Delete(ctx, reloader) +} + +// deployReloaderInstance creates/updates Reloader instance to the managed cluster. +// Any Deployment, StatefulSet, DaemonSet instance deployed by Sveltos and mounting either +// a ConfigMap or Secret as volume, need to be reloaded (via rolling upgrade) when mounted +// resources are modified. +// Reloader instance contains list of Deployment, StatefulSet, DaemonSet instances sveltos-agent needs +// to watch (along with mounted ConfigMaps/Secrets) to detect when is time to trigger a rolling upgrade. +func deployReloaderInstance(ctx context.Context, remoteClient client.Client, + clusterProfileName string, feature configv1alpha1.FeatureID, resources []corev1.ObjectReference, + logger logr.Logger) error { + + reloaderInfo := make([]libsveltosv1alpha1.ReloaderInfo, 0) + for i := range resources { + resource := &resources[i] + if watchForRollingUpgrade(resource) { + reloaderInfo = append(reloaderInfo, + libsveltosv1alpha1.ReloaderInfo{ + Namespace: resource.Namespace, + Name: resource.Name, + Kind: resource.Kind, + }) + } + } + + reloader, err := getReloaderInstance(ctx, remoteClient, clusterProfileName, feature, logger) + if err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get reloader instance: %v", err)) + return err + } + + if reloader == nil { + // Reloader is not present in the managed cluster + return createReloaderInstance(ctx, remoteClient, clusterProfileName, feature, reloaderInfo) + } + + reloader.Spec.ReloaderInfo = reloaderInfo + return remoteClient.Update(ctx, reloader) +} + +// createReloaderInstance creates Reloader instance to managed cluster. +func createReloaderInstance(ctx context.Context, remoteClient client.Client, clusterProfileName string, + feature configv1alpha1.FeatureID, reloaderInfo []libsveltosv1alpha1.ReloaderInfo) error { + + h := sha256.New() + fmt.Fprintf(h, "%s--%s", clusterProfileName, feature) + hash := h.Sum(nil) + reloader := &libsveltosv1alpha1.Reloader{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%x", hash), + Labels: getReloaderLabels(clusterProfileName, feature), + }, + Spec: libsveltosv1alpha1.ReloaderSpec{ + ReloaderInfo: reloaderInfo, + }, + } + + return remoteClient.Create(ctx, reloader) +} + +// getReloaderInstance returns ReloaderInstance if present in the managed cluster. +func getReloaderInstance(ctx context.Context, remoteClient client.Client, clusterProfileName string, + feature configv1alpha1.FeatureID, logger logr.Logger) (*libsveltosv1alpha1.Reloader, error) { + + reloaders := &libsveltosv1alpha1.ReloaderList{} + listOptions := []client.ListOption{ + client.MatchingLabels(getReloaderLabels(clusterProfileName, feature)), + } + + err := remoteClient.List(ctx, reloaders, listOptions...) + if err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to list Reloaders: %v", err)) + return nil, err + } + + switch len(reloaders.Items) { + case 0: + return nil, nil + case 1: + return &reloaders.Items[0], nil + default: + return nil, fmt.Errorf("found %d matches", len(reloaders.Items)) + } +} + +// getReloaderLabels returns labels a Reloader instance has in a managed cluster +func getReloaderLabels(clusterProfileName string, feature configv1alpha1.FeatureID) map[string]string { + return map[string]string{ + "clusterprofile": clusterProfileName, + "feature": string(feature), + } +} + +// watchForRollingUpgrade returns true if the resource should be watched for rolling upgrades +func watchForRollingUpgrade(resource *corev1.ObjectReference) bool { + switch resource.Kind { + case "Deployment": + return true + case "StatefulSet": + return true + case "DaemonSet": + return true + default: + return false + } +} + +// updateReloaderWithDeployedResources updates corresponding Reloader instance in the +// managed cluster. +// Reload indicates whether reloader instance needs to be removed, which can happen +// because ClusterSummary is being deleted or ClusterProfile.Spec.Reloader is set to false. +func updateReloaderWithDeployedResources(ctx context.Context, c client.Client, + clusterProfileOwnerRef *metav1.OwnerReference, feature configv1alpha1.FeatureID, + resources []corev1.ObjectReference, clusterSummary *configv1alpha1.ClusterSummary, + logger logr.Logger) error { + + // Ignore admin. Deploying Reloaders must be done as Sveltos. + // There is no need to ask tenant to be granted Reloader permissions + remoteClient, err := clusterproxy.GetKubernetesClient(ctx, c, clusterSummary.Spec.ClusterNamespace, + clusterSummary.Spec.ClusterName, "", "", clusterSummary.Spec.ClusterType, logger) + if err != nil { + return err + } + + // if ClusterSummary is being deleted or Reloader knob is not set, clean Reloader + if !clusterSummary.DeletionTimestamp.IsZero() || + !clusterSummary.Spec.ClusterProfileSpec.Reloader { + + return removeReloaderInstance(ctx, remoteClient, clusterProfileOwnerRef.Name, + feature, logger) + } + + return deployReloaderInstance(ctx, remoteClient, clusterProfileOwnerRef.Name, + feature, resources, logger) +} + +// convertResourceReportsToObjectReference converts a slice of ResourceReports to +// a slice of ObjectReference +func convertResourceReportsToObjectReference(resourceReports []configv1alpha1.ResourceReport, +) []corev1.ObjectReference { + + resources := make([]corev1.ObjectReference, len(resourceReports)) + + for i := range resourceReports { + rr := &resourceReports[i] + resources[i] = corev1.ObjectReference{ + Kind: rr.Resource.Kind, + Namespace: rr.Resource.Namespace, + Name: rr.Resource.Name, + } + } + + return resources +} + +// convertHelmResourcesToObjectReference converts a slice of HelmResources to +// a slice of ObjectReference +func convertHelmResourcesToObjectReference(helmResources []libsveltosv1alpha1.HelmResources, +) []corev1.ObjectReference { + + resources := make([]corev1.ObjectReference, 0) + + for i := range helmResources { + for j := range helmResources[i].Resources { + resources = append(resources, corev1.ObjectReference{ + Kind: helmResources[i].Resources[j].Kind, + Namespace: helmResources[i].Resources[j].Namespace, + Name: helmResources[i].Resources[j].Name, + }) + } + } + + return resources +} diff --git a/controllers/reloader_utils_test.go b/controllers/reloader_utils_test.go new file mode 100644 index 00000000..97ecc51b --- /dev/null +++ b/controllers/reloader_utils_test.go @@ -0,0 +1,326 @@ +/* +Copyright 2023. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers_test + +import ( + "context" + "fmt" + "reflect" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2/klogr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + configv1alpha1 "github.com/projectsveltos/addon-controller/api/v1alpha1" + "github.com/projectsveltos/addon-controller/controllers" + libsveltosv1alpha1 "github.com/projectsveltos/libsveltos/api/v1alpha1" +) + +var _ = Describe("Reloader utils", func() { + It("watchForRollingUpgrade returns true only for Deployment/StatefulSet/DaemonSet", func() { + type resourceData struct { + resource *corev1.ObjectReference + result bool + } + + testData := []resourceData{ + { + resource: &corev1.ObjectReference{Kind: "Deployment", Namespace: randomString(), Name: randomString()}, + result: true, + }, + { + resource: &corev1.ObjectReference{Kind: "StatefulSet", Namespace: randomString(), Name: randomString()}, + result: true, + }, + { + resource: &corev1.ObjectReference{Kind: "DaemonSet", Namespace: randomString(), Name: randomString()}, + result: true, + }, + { + resource: &corev1.ObjectReference{Kind: randomString(), Namespace: randomString(), Name: randomString()}, + result: false, + }, + } + + for i := range testData { + Expect(controllers.WatchForRollingUpgrade(testData[i].resource)).To( + Equal(testData[i].result), fmt.Sprintf("resource %s", testData[i].resource.Kind)) + } + }) + + It("createReloaderInstance creates reloader instance", func() { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + reloaderInfo := []libsveltosv1alpha1.ReloaderInfo{ + {Kind: "Deployment", Namespace: randomString(), Name: randomString()}, + {Kind: "Deployment", Namespace: randomString(), Name: randomString()}, + } + + clusterProfileName := randomString() + feature := configv1alpha1.FeatureHelm + Expect(controllers.CreateReloaderInstance(context.TODO(), c, + clusterProfileName, feature, reloaderInfo)).To(Succeed()) + + reloaders := &libsveltosv1alpha1.ReloaderList{} + Expect(c.List(context.TODO(), reloaders)).To(Succeed()) + Expect(len(reloaders.Items)).To(Equal(1)) + Expect(len(reloaders.Items[0].Labels)).ToNot(BeNil()) + Expect(reflect.DeepEqual(reloaders.Items[0].Spec.ReloaderInfo, reloaderInfo)).To(BeTrue()) + }) + + It("deployReloaderInstance creates/updates reloader instance", func() { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + resources := []corev1.ObjectReference{ + {Kind: "Deployment", Namespace: randomString(), Name: randomString()}, + {Kind: "StatefulSet", Namespace: randomString(), Name: randomString()}, + {Kind: "DaemonSet", Namespace: randomString(), Name: randomString()}, + } + + clusterProfileName := randomString() + feature := configv1alpha1.FeatureHelm + Expect(controllers.DeployReloaderInstance(context.TODO(), c, + clusterProfileName, feature, resources, klogr.New())).To(Succeed()) + + reloaders := &libsveltosv1alpha1.ReloaderList{} + Expect(c.List(context.TODO(), reloaders)).To(Succeed()) + Expect(len(reloaders.Items)).To(Equal(1)) + Expect(len(reloaders.Items[0].Labels)).ToNot(BeNil()) + + Expect(len(reloaders.Items[0].Spec.ReloaderInfo)).To(Equal(len(resources))) + for i := range resources { + Expect(reloaders.Items[0].Spec.ReloaderInfo).To(ContainElement( + libsveltosv1alpha1.ReloaderInfo{ + Kind: resources[i].Kind, + Namespace: resources[i].Namespace, + Name: resources[i].Name, + })) + } + + resources = []corev1.ObjectReference{ + {Kind: "Deployment", Namespace: randomString(), Name: randomString()}, + {Kind: "Deployment", Namespace: randomString(), Name: randomString()}, + {Kind: "StatefulSet", Namespace: randomString(), Name: randomString()}, + {Kind: "DaemonSet", Namespace: randomString(), Name: randomString()}, + } + + // Reloader Spec.ReloaderInfo is updated now + Expect(controllers.DeployReloaderInstance(context.TODO(), c, + clusterProfileName, feature, resources, klogr.New())).To(Succeed()) + + Expect(c.List(context.TODO(), reloaders)).To(Succeed()) + Expect(len(reloaders.Items)).To(Equal(1)) + Expect(len(reloaders.Items[0].Labels)).ToNot(BeNil()) + + Expect(len(reloaders.Items[0].Spec.ReloaderInfo)).To(Equal(len(resources))) + for i := range resources { + Expect(reloaders.Items[0].Spec.ReloaderInfo).To(ContainElement( + libsveltosv1alpha1.ReloaderInfo{ + Kind: resources[i].Kind, + Namespace: resources[i].Namespace, + Name: resources[i].Name, + })) + } + }) + + It("removeReloaderInstance returns no error when Reloader instance does not exist", func() { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + Expect(controllers.RemoveReloaderInstance(context.TODO(), c, randomString(), + configv1alpha1.FeatureKustomize, klogr.New())).To(BeNil()) + }) + + It("removeReloaderInstance removes Reloader instance", func() { + clusterProfileName := randomString() + feature := configv1alpha1.FeatureKustomize + + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + Expect(controllers.CreateReloaderInstance(context.TODO(), c, + clusterProfileName, feature, nil)).To(Succeed()) + reloaders := &libsveltosv1alpha1.ReloaderList{} + Expect(c.List(context.TODO(), reloaders)).To(Succeed()) + Expect(len(reloaders.Items)).To(Equal(1)) + + Expect(controllers.RemoveReloaderInstance(context.TODO(), c, clusterProfileName, + feature, klogr.New())).To(BeNil()) + + Expect(c.List(context.TODO(), reloaders)).To(Succeed()) + Expect(len(reloaders.Items)).To(Equal(0)) + }) + + It("updateReloaderWithDeployedResources creates reloader instance", func() { + resources := []corev1.ObjectReference{ + { + Kind: "Deployment", + Name: randomString(), + Namespace: randomString(), + }, + { + Kind: "DaemonSet", + Name: randomString(), + Namespace: randomString(), + }, + } + + // Creates cluster and Secret with kubeconfig to access it + // This is needed as updateReloaderWithDeployedResources fetches the + // Secret containing the Kubeconfig to access the cluster + cluster := prepareCluster() + + clusterProfileOwner := &metav1.OwnerReference{ + Kind: configv1alpha1.ClusterProfileKind, + APIVersion: configv1alpha1.GroupVersion.String(), + Name: randomString(), + UID: types.UID(randomString()), + } + + clusterSummary := &configv1alpha1.ClusterSummary{ + ObjectMeta: metav1.ObjectMeta{ + Name: randomString(), + Namespace: randomString(), + }, + Spec: configv1alpha1.ClusterSummarySpec{ + ClusterNamespace: cluster.Namespace, + ClusterName: cluster.Name, + ClusterType: libsveltosv1alpha1.ClusterTypeCapi, + ClusterProfileSpec: configv1alpha1.ClusterProfileSpec{ + Reloader: true, + }, + }, + } + + Expect(controllers.UpdateReloaderWithDeployedResources(context.TODO(), testEnv.Client, clusterProfileOwner, + configv1alpha1.FeatureResources, resources, clusterSummary, klogr.New())).To(Succeed()) + + reloaders := &libsveltosv1alpha1.ReloaderList{} + + Eventually(func() bool { + err := testEnv.Client.List(context.TODO(), reloaders) + return err == nil && len(reloaders.Items) == 1 + }, timeout, pollingInterval).Should(BeTrue()) + + Expect(testEnv.Client.List(context.TODO(), reloaders)).To(Succeed()) + + for i := range resources { + Expect(reloaders.Items[0].Spec.ReloaderInfo).To(ContainElement( + libsveltosv1alpha1.ReloaderInfo{ + Kind: resources[i].Kind, + Namespace: resources[i].Namespace, + Name: resources[i].Name, + }, + )) + } + + clusterSummary.Spec.ClusterProfileSpec.Reloader = false + + Expect(controllers.UpdateReloaderWithDeployedResources(context.TODO(), testEnv.Client, clusterProfileOwner, + configv1alpha1.FeatureResources, nil, clusterSummary, klogr.New())).To(Succeed()) + + Eventually(func() bool { + err := testEnv.Client.List(context.TODO(), reloaders) + return err == nil && len(reloaders.Items) == 0 + }, timeout, pollingInterval).Should(BeTrue()) + }) + + It("convertResourceReportsToObjectReference converts ResourceReports to ObjectReference", func() { + resourceReports := []configv1alpha1.ResourceReport{ + { + Resource: configv1alpha1.Resource{ + Kind: "StatefulSet", + Name: randomString(), + Namespace: randomString(), + }, + }, + { + Resource: configv1alpha1.Resource{ + Kind: "DaemonSet", + Name: randomString(), + Namespace: randomString(), + }, + }, + { + Resource: configv1alpha1.Resource{ + Kind: "Deployment", + Name: randomString(), + Namespace: randomString(), + }, + }, + } + + resources := controllers.ConvertResourceReportsToObjectReference(resourceReports) + Expect(len(resources)).To(Equal(len(resourceReports))) + + for i := range resourceReports { + Expect(resources).To(ContainElement(corev1.ObjectReference{ + Kind: resourceReports[i].Resource.Kind, + Namespace: resourceReports[i].Resource.Namespace, + Name: resourceReports[i].Resource.Name, + })) + } + }) + + It("convertHelmResourcesToObjectReference converts HelmResources to ObjectReference", func() { + resourceReports := []libsveltosv1alpha1.HelmResources{ + { + Resources: []libsveltosv1alpha1.Resource{ + { + Kind: "StatefulSet", + Name: randomString(), + Namespace: randomString(), + }, + { + Kind: "StatefulSet", + Name: randomString(), + Namespace: randomString(), + }, + }, + }, + { + Resources: []libsveltosv1alpha1.Resource{ + { + Kind: "Deployment", + Name: randomString(), + Namespace: randomString(), + }, + { + Kind: "DaemonSet", + Name: randomString(), + Namespace: randomString(), + }, + }, + }, + } + + resources := controllers.ConvertHelmResourcesToObjectReference(resourceReports) + + for i := range resourceReports { + for j := range resourceReports[i].Resources { + Expect(resources).To(ContainElement(corev1.ObjectReference{ + Kind: resourceReports[i].Resources[j].Kind, + Namespace: resourceReports[i].Resources[j].Namespace, + Name: resourceReports[i].Resources[j].Name, + })) + } + } + }) +}) diff --git a/go.mod b/go.mod index 240c3fd9..f97a1fdc 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.8 github.com/pkg/errors v0.9.1 - github.com/projectsveltos/libsveltos v0.14.1-0.20230723115557-8c564d7df10c + github.com/projectsveltos/libsveltos v0.14.1-0.20230801071844-911755ff3e1d github.com/prometheus/client_golang v1.16.0 github.com/spf13/pflag v1.0.5 github.com/yuin/gopher-lua v1.1.0 @@ -31,7 +31,7 @@ require ( k8s.io/component-base v0.27.2 k8s.io/klog/v2 v2.90.1 k8s.io/utils v0.0.0-20230505201702-9f6742963106 - sigs.k8s.io/cluster-api v1.5.0-rc.1 + sigs.k8s.io/cluster-api v1.5.0 sigs.k8s.io/controller-runtime v0.15.0 sigs.k8s.io/kustomize/api v0.13.2 sigs.k8s.io/kustomize/kyaml v0.14.1 diff --git a/go.sum b/go.sum index ff82c3b4..d7928c18 100644 --- a/go.sum +++ b/go.sum @@ -557,8 +557,8 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/projectsveltos/libsveltos v0.14.1-0.20230723115557-8c564d7df10c h1:zyAntB59hBysMNnQ5UtkNKMFQQfwPorljoVPkgeieLs= -github.com/projectsveltos/libsveltos v0.14.1-0.20230723115557-8c564d7df10c/go.mod h1:lxPKeFR3wjRVmqmo0UARcs19/QfWdNZJkUkafcyWOdo= +github.com/projectsveltos/libsveltos v0.14.1-0.20230801071844-911755ff3e1d h1:caZaXoIcHWkc9NPVxL2xPQlLtb3bZNEMy664+GWooTI= +github.com/projectsveltos/libsveltos v0.14.1-0.20230801071844-911755ff3e1d/go.mod h1:7PaqLwqxsnsG+lV07m4RO4rOg2gKhlmpJVanWcB84JQ= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -1168,8 +1168,8 @@ oras.land/oras-go v1.2.2/go.mod h1:Apa81sKoZPpP7CDciE006tSZ0x3Q3+dOoBcMZ/aNxvw= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/cluster-api v1.5.0-rc.1 h1:pLaRjFlknMWMKzippA+Dm2HXmONubAicSujFhsO2pzo= -sigs.k8s.io/cluster-api v1.5.0-rc.1/go.mod h1:ZSEP01t8oT6104gB4ljsOwwp5uJcI8SWy8IFp2HUvrc= +sigs.k8s.io/cluster-api v1.5.0 h1:pwXvzScbAwnrB7EWHTApzW+VQfrj2OSrWAQDC9+bcbU= +sigs.k8s.io/cluster-api v1.5.0/go.mod h1:ZSEP01t8oT6104gB4ljsOwwp5uJcI8SWy8IFp2HUvrc= sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= diff --git a/hack/tools/go.mod b/hack/tools/go.mod index cf16b163..a7388ef6 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -7,8 +7,8 @@ require ( github.com/onsi/ginkgo/v2 v2.11.0 golang.org/x/oauth2 v0.10.0 k8s.io/client-go v0.27.2 - sigs.k8s.io/cluster-api v1.5.0-rc.1 - sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230707165103-87487d3539d7 + sigs.k8s.io/cluster-api v1.5.0 + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230728161957-7f0c6dc440f3 sigs.k8s.io/controller-tools v0.12.0 sigs.k8s.io/kind v0.20.0 ) diff --git a/hack/tools/go.sum b/hack/tools/go.sum index a8f1026b..5817adfd 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -973,12 +973,12 @@ k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/cluster-api v1.5.0-rc.1 h1:pLaRjFlknMWMKzippA+Dm2HXmONubAicSujFhsO2pzo= -sigs.k8s.io/cluster-api v1.5.0-rc.1/go.mod h1:ZSEP01t8oT6104gB4ljsOwwp5uJcI8SWy8IFp2HUvrc= +sigs.k8s.io/cluster-api v1.5.0 h1:pwXvzScbAwnrB7EWHTApzW+VQfrj2OSrWAQDC9+bcbU= +sigs.k8s.io/cluster-api v1.5.0/go.mod h1:ZSEP01t8oT6104gB4ljsOwwp5uJcI8SWy8IFp2HUvrc= sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230707165103-87487d3539d7 h1:p89i2qkhQgK0phT/jZo0StBx1nC5ptY354nzx5EXmzs= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230707165103-87487d3539d7/go.mod h1:B6HLcvOy2S1qq2eWOFm9xepiKPMIc8Z9OXSPsnUDaR4= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230728161957-7f0c6dc440f3 h1:wi7cNi1Fic5lOeboayUElQJhWp3CAwmzeLBdUG0QwOY= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230728161957-7f0c6dc440f3/go.mod h1:B6HLcvOy2S1qq2eWOFm9xepiKPMIc8Z9OXSPsnUDaR4= sigs.k8s.io/controller-tools v0.12.0 h1:TY6CGE6+6hzO7hhJFte65ud3cFmmZW947jajXkuDfBw= sigs.k8s.io/controller-tools v0.12.0/go.mod h1:rXlpTfFHZMpZA8aGq9ejArgZiieHd+fkk/fTatY8A2M= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/manifest/manifest.yaml b/manifest/manifest.yaml index 12ef87d0..8a956fd8 100644 --- a/manifest/manifest.yaml +++ b/manifest/manifest.yaml @@ -456,6 +456,16 @@ spec: - namespace type: object type: array + reloader: + default: false + description: Reloader indicates whether Deployment/StatefulSet/DaemonSet + instances deployed by Sveltos and part of this ClusterProfile need + to be restarted via rolling upgrade when a ConfigMap/Secret instance + mounted as volume is modified. When set to true, when any mounted + ConfigMap/Secret is modified, Sveltos automatically starts a rolling + upgrade for Deployment/StatefulSet/DaemonSet instances mounting + it. + type: boolean stopMatchingBehavior: default: WithdrawPolicies description: StopMatchingBehavior indicates what behavior should be @@ -1169,6 +1179,16 @@ spec: - namespace type: object type: array + reloader: + default: false + description: Reloader indicates whether Deployment/StatefulSet/DaemonSet + instances deployed by Sveltos and part of this ClusterProfile + need to be restarted via rolling upgrade when a ConfigMap/Secret + instance mounted as volume is modified. When set to true, when + any mounted ConfigMap/Secret is modified, Sveltos automatically + starts a rolling upgrade for Deployment/StatefulSet/DaemonSet + instances mounting it. + type: boolean stopMatchingBehavior: default: WithdrawPolicies description: StopMatchingBehavior indicates what behavior should @@ -1843,7 +1863,7 @@ spec: - --v=5 command: - /manager - image: projectsveltos/addon-controller-amd64:main + image: projectsveltos/addon-controller-amd64:dev livenessProbe: httpGet: path: /healthz diff --git a/test/fv/reloader_test.go b/test/fv/reloader_test.go new file mode 100644 index 00000000..5e564ba9 --- /dev/null +++ b/test/fv/reloader_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2023. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fv_test + +import ( + "context" + "crypto/sha256" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + configv1alpha1 "github.com/projectsveltos/addon-controller/api/v1alpha1" + libsveltosv1alpha1 "github.com/projectsveltos/libsveltos/api/v1alpha1" +) + +const ( + configMap = `apiVersion: v1 +kind: ConfigMap +metadata: + name: example-configmap + namespace: %s +data: + key1: value1 + key2: value2 +` + + deploymentWithVolume = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: %s + namespace: %s +spec: + replicas: 2 + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: my-app-container + image: nginx:latest + ports: + - containerPort: 80 + volumeMounts: + - name: config-volume + mountPath: /etc/config + volumes: + - name: config-volume + configMap: + name: example-configmap + +` +) + +var _ = Describe("Reloader", func() { + const ( + namePrefix = "reloader-" + ) + + It("Deploy ClusterProfile with Reloader knob set", Label("FV", "EXTENDED"), func() { + Byf("Create a ClusterProfile with Reloader knob set matching Cluster %s/%s", + kindWorkloadCluster.Namespace, kindWorkloadCluster.Name) + clusterProfile := getClusterProfile(namePrefix, map[string]string{key: value}) + clusterProfile.Spec.SyncMode = configv1alpha1.SyncModeContinuous + clusterProfile.Spec.Reloader = true + Expect(k8sClient.Create(context.TODO(), clusterProfile)).To(Succeed()) + + verifyClusterProfileMatches(clusterProfile) + + verifyClusterSummary(clusterProfile, kindWorkloadCluster.Namespace, kindWorkloadCluster.Name) + + ns := randomString() + deploymentName := randomString() + Byf("Create a configMap with a deployment and configmap") + configMap := createConfigMapWithPolicy(defaultNamespace, randomString(), + fmt.Sprintf(configMap, ns), + fmt.Sprintf(deploymentWithVolume, deploymentName, ns)) + Expect(k8sClient.Create(context.TODO(), configMap)).To(Succeed()) + + currentConfigMap := &corev1.ConfigMap{} + Expect(k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: configMap.Namespace, Name: configMap.Name}, + currentConfigMap)).To(Succeed()) + + Byf("Update ClusterProfile %s to reference ConfigMap %s/%s", + clusterProfile.Name, configMap.Namespace, configMap.Name) + + currentClusterProfile := &configv1alpha1.ClusterProfile{} + Expect(k8sClient.Get(context.TODO(), + types.NamespacedName{Name: clusterProfile.Name}, currentClusterProfile)).To(Succeed()) + currentClusterProfile.Spec.PolicyRefs = []configv1alpha1.PolicyRef{ + { + Kind: string(libsveltosv1alpha1.ConfigMapReferencedResourceKind), + Namespace: configMap.Namespace, + Name: configMap.Name, + }, + } + Expect(k8sClient.Update(context.TODO(), currentClusterProfile)).To(Succeed()) + + clusterSummary := verifyClusterSummary(currentClusterProfile, + kindWorkloadCluster.Namespace, kindWorkloadCluster.Name) + + Byf("Verifying ClusterSummary %s status is set to Deployed for Resources feature", clusterSummary.Name) + verifyFeatureStatusIsProvisioned(kindWorkloadCluster.Namespace, clusterSummary.Name, + configv1alpha1.FeatureResources) + + Byf("Getting client to access the workload cluster") + workloadClient, err := getKindWorkloadClusterKubeconfig() + Expect(err).To(BeNil()) + Expect(workloadClient).ToNot(BeNil()) + + Byf("Verifying Reloader is present in the managed cluster") + currentReloader := &libsveltosv1alpha1.Reloader{} + Expect(workloadClient.Get(context.TODO(), + types.NamespacedName{Name: getReloaderName(clusterProfile.Name, configv1alpha1.FeatureResources)}, + currentReloader)).To(Succeed()) + Byf("Verifying Reloader list Deployment") + Expect(len(currentReloader.Spec.ReloaderInfo)).To(Equal(1)) + Expect(currentReloader.Spec.ReloaderInfo).To(ContainElement( + libsveltosv1alpha1.ReloaderInfo{ + Kind: "Deployment", + Namespace: ns, + Name: deploymentName, + }, + )) + + deleteClusterProfile(clusterProfile) + + Byf("Verifying Reloader is removed from the workload cluster") + Eventually(func() bool { + currentReloader := &libsveltosv1alpha1.Reloader{} + err = workloadClient.Get(context.TODO(), + types.NamespacedName{Name: getReloaderName(clusterProfile.Name, configv1alpha1.FeatureResources)}, + currentReloader) + return err != nil && apierrors.IsNotFound(err) + }, timeout, pollingInterval).Should(BeTrue()) + }) +}) + +// getReloaderName returns the Reloader's name +func getReloaderName(clusterProfileName string, + feature configv1alpha1.FeatureID) string { + + h := sha256.New() + fmt.Fprintf(h, "%s--%s", clusterProfileName, feature) + hash := h.Sum(nil) + return fmt.Sprintf("%x", hash) +}