Skip to content

Commit

Permalink
Merge pull request #289 from gianlucam76/reload
Browse files Browse the repository at this point in the history
Introduce Reloader knob on ClusterProfile
  • Loading branch information
gianlucam76 authored Aug 2, 2023
2 parents 201839a + 3a38b9a commit 9188e12
Show file tree
Hide file tree
Showing 19 changed files with 935 additions and 46 deletions.
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ ARCH ?= amd64
OS ?= $(shell uname -s | tr A-Z a-z)
K8S_LATEST_VER ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)
export CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME)
TAG ?= main
TAG ?= dev

# Get cluster-api version and build ldflags
clusterapi := $(shell go list -m sigs.k8s.io/cluster-api)
Expand Down Expand Up @@ -220,6 +220,9 @@ create-cluster: $(KIND) $(CLUSTERCTL) $(KUBECTL) $(ENVSUBST) ## Create a new kin
@echo wait for calico pod
$(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig wait --for=condition=Available deployment/calico-kube-controllers -n kube-system --timeout=$(TIMEOUT)

@echo apply reloader CRD to managed cluster
$(KUBECTL) --kubeconfig=./test/fv/workload_kubeconfig apply -f https://raw.githubusercontent.com/projectsveltos/libsveltos/$(TAG)/config/crd/bases/lib.projectsveltos.io_reloaders.yaml

.PHONY: delete-cluster
delete-cluster: $(KIND) ## Deletes the kind cluster $(CONTROL_CLUSTER_NAME)
$(KIND) delete cluster --name $(CONTROL_CLUSTER_NAME)
Expand Down
9 changes: 9 additions & 0 deletions api/v1alpha1/clusterprofile_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,15 @@ type ClusterProfileSpec struct {
// +optional
StopMatchingBehavior StopMatchingBehavior `json:"stopMatchingBehavior,omitempty"`

// Reloader indicates whether Deployment/StatefulSet/DaemonSet instances deployed
// by Sveltos and part of this ClusterProfile need to be restarted via rolling upgrade
// when a ConfigMap/Secret instance mounted as volume is modified.
// When set to true, when any mounted ConfigMap/Secret is modified, Sveltos automatically
// starts a rolling upgrade for Deployment/StatefulSet/DaemonSet instances mounting it.
// +kubebuilder:default:=false
// +optional
Reloader bool `json:"reloader,omitempty"`

// TemplateResourceRefs is a list of resource to collect from the management cluster.
// Those resources' values will be used to instantiate templates contained in referenced
// PolicyRefs and Helm charts
Expand Down
10 changes: 10 additions & 0 deletions config/crd/bases/config.projectsveltos.io_clusterprofiles.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,16 @@ spec:
- namespace
type: object
type: array
reloader:
default: false
description: Reloader indicates whether Deployment/StatefulSet/DaemonSet
instances deployed by Sveltos and part of this ClusterProfile need
to be restarted via rolling upgrade when a ConfigMap/Secret instance
mounted as volume is modified. When set to true, when any mounted
ConfigMap/Secret is modified, Sveltos automatically starts a rolling
upgrade for Deployment/StatefulSet/DaemonSet instances mounting
it.
type: boolean
stopMatchingBehavior:
default: WithdrawPolicies
description: StopMatchingBehavior indicates what behavior should be
Expand Down
10 changes: 10 additions & 0 deletions config/crd/bases/config.projectsveltos.io_clustersummaries.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,16 @@ spec:
- namespace
type: object
type: array
reloader:
default: false
description: Reloader indicates whether Deployment/StatefulSet/DaemonSet
instances deployed by Sveltos and part of this ClusterProfile
need to be restarted via rolling upgrade when a ConfigMap/Secret
instance mounted as volume is modified. When set to true, when
any mounted ConfigMap/Secret is modified, Sveltos automatically
starts a rolling upgrade for Deployment/StatefulSet/DaemonSet
instances mounting it.
type: boolean
stopMatchingBehavior:
default: WithdrawPolicies
description: StopMatchingBehavior indicates what behavior should
Expand Down
2 changes: 1 addition & 1 deletion config/default/manager_image_patch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ spec:
spec:
containers:
# Change the value of image field below to your controller image URL
- image: projectsveltos/addon-controller-amd64:main
- image: projectsveltos/addon-controller-amd64:dev
name: controller
6 changes: 6 additions & 0 deletions controllers/controllers_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,12 @@ var _ = BeforeSuite(func() {
Expect(testEnv.Create(context.TODO(), addonComplianceCRD)).To(Succeed())
Expect(waitForObject(context.TODO(), testEnv, addonComplianceCRD)).To(Succeed())

var reloaderCRD *unstructured.Unstructured
reloaderCRD, err = utils.GetUnstructured(libsveltoscrd.GetReloaderCRDYAML())
Expect(err).To(BeNil())
Expect(testEnv.Create(context.TODO(), reloaderCRD)).To(Succeed())
Expect(waitForObject(context.TODO(), testEnv, reloaderCRD)).To(Succeed())

// Wait for synchronization
// Sometimes we otherwise get "no matches for kind "AddonCompliance" in version "lib.projectsveltos.io/v1alpha1"
time.Sleep(2 * time.Second)
Expand Down
11 changes: 11 additions & 0 deletions controllers/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,3 +133,14 @@ var (
RunLuaValidations = runLuaValidations
LuaValidation = luaValidation
)

// reloader utils
var (
WatchForRollingUpgrade = watchForRollingUpgrade
CreateReloaderInstance = createReloaderInstance
DeployReloaderInstance = deployReloaderInstance
RemoveReloaderInstance = removeReloaderInstance
UpdateReloaderWithDeployedResources = updateReloaderWithDeployedResources
ConvertResourceReportsToObjectReference = convertResourceReportsToObjectReference
ConvertHelmResourcesToObjectReference = convertHelmResourcesToObjectReference
)
62 changes: 50 additions & 12 deletions controllers/handlers_helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,15 +129,42 @@ func deployHelmCharts(ctx context.Context, c client.Client,
return err
}

var helmResources []libsveltosv1alpha1.HelmResources
if clusterSummary.Spec.ClusterProfileSpec.SyncMode == configv1alpha1.SyncModeContinuousWithDriftDetection ||
clusterSummary.Spec.ClusterProfileSpec.Reloader {

helmResources, err = collectResourcesFromManagedHelmCharts(ctx, c, clusterSummary, kubeconfig, logger)
if err != nil {
return err
}
}

if clusterSummary.Spec.ClusterProfileSpec.SyncMode == configv1alpha1.SyncModeContinuousWithDriftDetection {
// Deploy resourceSummary
err = deployResourceSummaryWithHelmResources(ctx, c, clusterNamespace, clusterName,
clusterType, clusterSummary, kubeconfig, logger)
err = deployResourceSummaryInCluster(ctx, c, clusterNamespace, clusterName, clusterSummary.Name,
clusterType, nil, nil, helmResources, logger)
if err != nil {
return err
return nil
}
}

clusterProfileOwnerRef, err := configv1alpha1.GetClusterProfileOwnerReference(clusterSummary)
if err != nil {
return err
}

// Update Reloader instance. If ClusterProfile Reloader knob is set to true, sveltos will
// start a rolling upgrade for all Deployment/StatefulSet/DaemonSet instances deployed by Sveltos
// in the managed cluster when a mounted ConfigMap/Secret is updated. In order to do so, sveltos-agent
// needs to be instructed which Deployment/StatefulSet/DaemonSet instances require this behavior.
// Update corresponding Reloader instance (instance will be deleted if Reloader is set to false)
resources := convertHelmResourcesToObjectReference(helmResources)
err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureHelm,
resources, clusterSummary, logger)
if err != nil {
return err
}

return nil
}

Expand All @@ -161,6 +188,8 @@ func undeployHelmCharts(ctx context.Context, c client.Client,
logger = logger.WithValues("clusterSummary", clusterSummary.Name)
logger = logger.WithValues("admin", fmt.Sprintf("%s/%s", adminNamespace, adminName))

logger.V(logs.LogDebug).Info("undeployHelmCharts")

kubeconfigContent, err := clusterproxy.GetSecretData(ctx, c, clusterNamespace, clusterName,
adminNamespace, adminName, clusterSummary.Spec.ClusterType, logger)
if err != nil {
Expand Down Expand Up @@ -194,6 +223,13 @@ func undeployHelmCharts(ctx context.Context, c client.Client,
if err != nil {
return err
}

err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureKustomize,
nil, clusterSummary, logger)
if err != nil {
return err
}

err = updateClusterConfiguration(ctx, c, clusterSummary, clusterProfileOwnerRef,
configv1alpha1.FeatureHelm, nil, []configv1alpha1.Chart{})
if err != nil {
Expand Down Expand Up @@ -1393,13 +1429,16 @@ func getInstantiatedValues(ctx context.Context, clusterSummary *configv1alpha1.C
return chartutil.ReadValues([]byte(instantiatedValues))
}

func deployResourceSummaryWithHelmResources(ctx context.Context, c client.Client,
clusterNamespace, clusterName string, clusterType libsveltosv1alpha1.ClusterType,
clusterSummary *configv1alpha1.ClusterSummary, kubeconfig string, logger logr.Logger) error {
// collectResourcesFromManagedHelmCharts collects resources considering all
// helm charts contained in a ClusterSummary that are currently managed by the
// ClusterProfile instance
func collectResourcesFromManagedHelmCharts(ctx context.Context, c client.Client,
clusterSummary *configv1alpha1.ClusterSummary, kubeconfig string, logger logr.Logger,
) ([]libsveltosv1alpha1.HelmResources, error) {

chartManager, err := chartmanager.GetChartManagerInstance(ctx, c)
if err != nil {
return err
return nil, err
}

helmResources := make([]libsveltosv1alpha1.HelmResources, 0)
Expand All @@ -1412,18 +1451,18 @@ func deployResourceSummaryWithHelmResources(ctx context.Context, c client.Client
actionConfig, err := actionConfigInit(currentChart.ReleaseNamespace, kubeconfig, logger)

if err != nil {
return err
return nil, err
}

statusObject := action.NewStatus(actionConfig)
results, err := statusObject.Run(currentChart.ReleaseName)
if err != nil {
return err
return nil, err
}

resources, err := collectHelmContent(results.Manifest, logger)
if err != nil {
return err
return nil, err
}

l.V(logs.LogDebug).Info(fmt.Sprintf("found %d resources", len(resources)))
Expand All @@ -1439,8 +1478,7 @@ func deployResourceSummaryWithHelmResources(ctx context.Context, c client.Client
}
}

return deployResourceSummaryInCluster(ctx, c, clusterNamespace, clusterName, clusterSummary.Name,
clusterType, nil, nil, helmResources, logger)
return helmResources, nil
}

func collectHelmContent(manifest string, logger logr.Logger) ([]*unstructured.Unstructured, error) {
Expand Down
42 changes: 34 additions & 8 deletions controllers/handlers_kustomize.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,12 @@ func deployKustomizeRefs(ctx context.Context, c client.Client,
if err != nil {
return err
}
remoteResources := convertResourceReportsToObjectReference(remoteResourceReports)
err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureKustomize,
remoteResources, clusterSummary, logger)
if err != nil {
return err
}

// If we are here there are no conflicts (and error would have been returned by deployKustomizeRef)
remoteDeployed := make([]configv1alpha1.Resource, 0)
Expand All @@ -116,15 +122,9 @@ func deployKustomizeRefs(ctx context.Context, c client.Client,
return err
}

// Clean stale resources in the management cluster
_, err = cleanKustomizeResources(ctx, getManagementClusterConfig(), c, clusterSummary, localResourceReports, logger)
if err != nil {
return err
}

// Clean stale resources in the remote cluster
var undeployed []configv1alpha1.ResourceReport
undeployed, err = cleanKustomizeResources(ctx, remoteRestConfig, remoteClient, clusterSummary, remoteResourceReports, logger)
_, undeployed, err = cleanStaleKustomizeResources(ctx, remoteRestConfig, remoteClient, clusterSummary,
localResourceReports, remoteResourceReports, logger)
if err != nil {
return err
}
Expand Down Expand Up @@ -152,6 +152,26 @@ func deployKustomizeRefs(ctx context.Context, c client.Client,
return nil
}

func cleanStaleKustomizeResources(ctx context.Context, remoteRestConfig *rest.Config, remoteClient client.Client,
clusterSummary *configv1alpha1.ClusterSummary, localResourceReports, remoteResourceReports []configv1alpha1.ResourceReport,
logger logr.Logger) (localUndeployed, remoteUndeployed []configv1alpha1.ResourceReport, err error) {
// Clean stale resources in the management cluster
localUndeployed, err = cleanKustomizeResources(ctx, getManagementClusterConfig(), getManagementClusterClient(),
clusterSummary, localResourceReports, logger)
if err != nil {
return
}

// Clean stale resources in the remote cluster
remoteUndeployed, err = cleanKustomizeResources(ctx, remoteRestConfig, remoteClient,
clusterSummary, remoteResourceReports, logger)
if err != nil {
return
}

return
}

func undeployKustomizeRefs(ctx context.Context, c client.Client,
clusterNamespace, clusterName, applicant, _ string,
clusterType libsveltosv1alpha1.ClusterType,
Expand Down Expand Up @@ -207,6 +227,12 @@ func undeployKustomizeRefs(ctx context.Context, c client.Client,
return err
}

err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureKustomize,
nil, clusterSummary, logger)
if err != nil {
return err
}

err = updateClusterConfiguration(ctx, c, clusterSummary, clusterProfileOwnerRef,
configv1alpha1.FeatureKustomize, []configv1alpha1.Resource{}, nil)
if err != nil {
Expand Down
46 changes: 38 additions & 8 deletions controllers/handlers_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,13 @@ func deployResources(ctx context.Context, c client.Client,
return err
}

remoteResources := convertResourceReportsToObjectReference(remoteResourceReports)
err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureResources,
remoteResources, clusterSummary, logger)
if err != nil {
return err
}

// If we are here there are no conflicts (and error would have been returned by deployPolicyRefs)
remoteDeployed := make([]configv1alpha1.Resource, 0)
for i := range remoteResourceReports {
Expand All @@ -100,15 +107,9 @@ func deployResources(ctx context.Context, c client.Client,
return err
}

// Clean stale resources in the management cluster
_, err = cleanPolicyRefResources(ctx, getManagementClusterConfig(), c, clusterSummary, localResourceReports, logger)
if err != nil {
return err
}

// Clean stale resources in the remote cluster
var undeployed []configv1alpha1.ResourceReport
undeployed, err = cleanPolicyRefResources(ctx, remoteRestConfig, remoteClient, clusterSummary, remoteResourceReports, logger)
_, undeployed, err = cleanStaleResources(ctx, remoteRestConfig, remoteClient, clusterSummary,
localResourceReports, remoteResourceReports, logger)
if err != nil {
return err
}
Expand Down Expand Up @@ -136,6 +137,27 @@ func deployResources(ctx context.Context, c client.Client,
return nil
}

func cleanStaleResources(ctx context.Context, remoteRestConfig *rest.Config, remoteClient client.Client,
clusterSummary *configv1alpha1.ClusterSummary, localResourceReports, remoteResourceReports []configv1alpha1.ResourceReport,
logger logr.Logger) (localUndeployed, remoteUndeployed []configv1alpha1.ResourceReport, err error) {

// Clean stale resources in the management cluster
localUndeployed, err = cleanPolicyRefResources(ctx, getManagementClusterConfig(), getManagementClusterClient(),
clusterSummary, localResourceReports, logger)
if err != nil {
return
}

// Clean stale resources in the remote cluster
remoteUndeployed, err = cleanPolicyRefResources(ctx, remoteRestConfig, remoteClient, clusterSummary,
remoteResourceReports, logger)
if err != nil {
return
}

return
}

// handleDriftDetectionManagerDeployment deploys, if sync mode is SyncModeContinuousWithDriftDetection,
// drift-detection-manager in the managed clyuster
func handleDriftDetectionManagerDeployment(ctx context.Context, clusterSummary *configv1alpha1.ClusterSummary,
Expand Down Expand Up @@ -268,6 +290,12 @@ func undeployResources(ctx context.Context, c client.Client,
return err
}

err = updateReloaderWithDeployedResources(ctx, c, clusterProfileOwnerRef, configv1alpha1.FeatureResources,
nil, clusterSummary, logger)
if err != nil {
return err
}

err = updateClusterConfiguration(ctx, c, clusterSummary, clusterProfileOwnerRef,
configv1alpha1.FeatureResources, []configv1alpha1.Resource{}, nil)
if err != nil {
Expand Down Expand Up @@ -403,6 +431,8 @@ func deployPolicyRefs(ctx context.Context, c client.Client, remoteConfig *rest.C

var objectsToDeployLocally []client.Object
var objectsToDeployRemotely []client.Object
// collect all referenced ConfigMaps/Secrets whose content need to be deployed
// in the management cluster (local) or manaded cluster (remote)
objectsToDeployLocally, objectsToDeployRemotely, err =
collectReferencedObjects(ctx, c, clusterSummary.Namespace, refs, logger)
if err != nil {
Expand Down
Loading

0 comments on commit 9188e12

Please sign in to comment.