diff --git a/api/v1/resourcegroup_types.go b/api/v1/resourcegroup_types.go new file mode 100644 index 0000000..794968e --- /dev/null +++ b/api/v1/resourcegroup_types.go @@ -0,0 +1,194 @@ +// Copyright 2024 Stefan Prodan. +// SPDX-License-Identifier: AGPL-3.0 + +package v1 + +import ( + "strings" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + ResourceGroupKind = "ResourceGroup" +) + +// ResourceGroupSpec defines the desired state of ResourceGroup +type ResourceGroupSpec struct { + // CommonMetadata specifies the common labels and annotations that are + // applied to all resources. Any existing label or annotation will be + // overridden if its key matches a common one. + // +optional + CommonMetadata *CommonMetadata `json:"commonMetadata,omitempty"` + + // Inputs contains the list of resource group inputs. + // +optional + Inputs []ResourceGroupInput `json:"inputs,omitempty"` + + // Resources contains the list of Kubernetes resources to reconcile. + // +optional + Resources []*apiextensionsv1.JSON `json:"resources,omitempty"` + + // DependsOn specifies the list of Kubernetes resources that must + // exist on the cluster before the reconciliation process starts. + // +optional + DependsOn []Dependency `json:"dependsOn,omitempty"` + + // The name of the Kubernetes service account to impersonate + // when reconciling the generated resources. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // Wait instructs the controller to check the health of all the reconciled + // resources. Defaults to true. + // +kubebuilder:default:=true + // +optional + Wait bool `json:"wait,omitempty"` +} + +// CommonMetadata defines the common labels and annotations. +type CommonMetadata struct { + // Annotations to be added to the object's metadata. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Labels to be added to the object's metadata. + // +optional + Labels map[string]string `json:"labels,omitempty"` +} + +// Dependency defines a ResourceGroup dependency on a Kubernetes resource. +type Dependency struct { + // APIVersion of the resource to depend on. + // +required + APIVersion string `json:"apiVersion"` + + // Kind of the resource to depend on. + // +required + Kind string `json:"kind"` + + // Name of the resource to depend on. + // +required + Name string `json:"name"` + + // Namespace of the resource to depend on. + // +optional + Namespace string `json:"namespace,omitempty"` + + // Ready checks if the resource Ready status condition is true. + // +optional + Ready bool `json:"ready,omitempty"` +} + +// ResourceGroupInput defines the key-value pairs of the resource group input. +type ResourceGroupInput map[string]string + +// ResourceGroupStatus defines the observed state of ResourceGroup +type ResourceGroupStatus struct { + meta.ReconcileRequestStatus `json:",inline"` + + // Conditions contains the readiness conditions of the object. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Inventory contains a list of Kubernetes resource object references + // last applied on the cluster. + // +optional + Inventory *ResourceInventory `json:"inventory,omitempty"` +} + +// GetConditions returns the status conditions of the object. +func (in *ResourceGroup) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *ResourceGroup) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// IsDisabled returns true if the object has the reconcile annotation set to 'disabled'. +func (in *ResourceGroup) IsDisabled() bool { + val, ok := in.GetAnnotations()[ReconcileAnnotation] + return ok && strings.ToLower(val) == DisabledValue +} + +// GetInterval returns the interval at which the object should be reconciled. +// If no interval is set, the default is 60 minutes. +func (in *ResourceGroup) GetInterval() time.Duration { + val, ok := in.GetAnnotations()[ReconcileAnnotation] + if ok && strings.ToLower(val) == DisabledValue { + return 0 + } + defaultInterval := 60 * time.Minute + val, ok = in.GetAnnotations()[ReconcileEveryAnnotation] + if !ok { + return defaultInterval + } + interval, err := time.ParseDuration(val) + if err != nil { + return defaultInterval + } + return interval +} + +// GetTimeout returns the timeout for the reconciliation process. +// If no timeout is set, the default is 5 minutes. +func (in *ResourceGroup) GetTimeout() time.Duration { + defaultTimeout := 5 * time.Minute + val, ok := in.GetAnnotations()[ReconcileTimeoutAnnotation] + if !ok { + return defaultTimeout + } + timeout, err := time.ParseDuration(val) + if err != nil { + return defaultTimeout + } + return timeout +} + +// GetInputs returns the resource group inputs. +func (in *ResourceGroup) GetInputs() []map[string]string { + var inputs = make([]map[string]string, len(in.Spec.Inputs)) + for i, input := range in.Spec.Inputs { + inputs[i] = make(map[string]string) + for k, v := range input { + inputs[i][k] = v + } + } + return inputs +} + +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=rg +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// ResourceGroup is the Schema for the ResourceGroups API +type ResourceGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ResourceGroupSpec `json:"spec,omitempty"` + Status ResourceGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceGroupList contains a list of ResourceGroup +type ResourceGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceGroup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ResourceGroup{}, &ResourceGroupList{}) +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index ac36bae..4ea5192 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -9,6 +9,7 @@ package v1 import ( "github.com/fluxcd/pkg/apis/kustomize" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -28,6 +29,35 @@ func (in *Cluster) DeepCopy() *Cluster { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonMetadata) DeepCopyInto(out *CommonMetadata) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonMetadata. +func (in *CommonMetadata) DeepCopy() *CommonMetadata { + if in == nil { + return nil + } + out := new(CommonMetadata) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentImage) DeepCopyInto(out *ComponentImage) { *out = *in @@ -43,6 +73,21 @@ func (in *ComponentImage) DeepCopy() *ComponentImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dependency) DeepCopyInto(out *Dependency) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependency. +func (in *Dependency) DeepCopy() *Dependency { + if in == nil { + return nil + } + out := new(Dependency) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Distribution) DeepCopyInto(out *Distribution) { *out = *in @@ -417,6 +462,163 @@ func (in *Kustomize) DeepCopy() *Kustomize { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroup) DeepCopyInto(out *ResourceGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroup. +func (in *ResourceGroup) DeepCopy() *ResourceGroup { + if in == nil { + return nil + } + out := new(ResourceGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceGroupInput) DeepCopyInto(out *ResourceGroupInput) { + { + in := &in + *out = make(ResourceGroupInput, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupInput. +func (in ResourceGroupInput) DeepCopy() ResourceGroupInput { + if in == nil { + return nil + } + out := new(ResourceGroupInput) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupList) DeepCopyInto(out *ResourceGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupList. +func (in *ResourceGroupList) DeepCopy() *ResourceGroupList { + if in == nil { + return nil + } + out := new(ResourceGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupSpec) DeepCopyInto(out *ResourceGroupSpec) { + *out = *in + if in.CommonMetadata != nil { + in, out := &in.CommonMetadata, &out.CommonMetadata + *out = new(CommonMetadata) + (*in).DeepCopyInto(*out) + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]ResourceGroupInput, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(ResourceGroupInput, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*apiextensionsv1.JSON, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + } + } + if in.DependsOn != nil { + in, out := &in.DependsOn, &out.DependsOn + *out = make([]Dependency, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupSpec. +func (in *ResourceGroupSpec) DeepCopy() *ResourceGroupSpec { + if in == nil { + return nil + } + out := new(ResourceGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupStatus) DeepCopyInto(out *ResourceGroupStatus) { + *out = *in + out.ReconcileRequestStatus = in.ReconcileRequestStatus + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = new(ResourceInventory) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupStatus. +func (in *ResourceGroupStatus) DeepCopy() *ResourceGroupStatus { + if in == nil { + return nil + } + out := new(ResourceGroupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceInventory) DeepCopyInto(out *ResourceInventory) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 08a0f1d..378c664 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -50,19 +50,22 @@ func init() { func main() { var ( - concurrent int - metricsAddr string - healthAddr string - enableLeaderElection bool - logOptions logger.Options - rateLimiterOptions runtimeCtrl.RateLimiterOptions - storagePath string + concurrent int + metricsAddr string + healthAddr string + enableLeaderElection bool + logOptions logger.Options + rateLimiterOptions runtimeCtrl.RateLimiterOptions + storagePath string + defaultServiceAccount string ) - flag.IntVar(&concurrent, "concurrent", 4, "The number of concurrent kustomize reconciles.") + flag.IntVar(&concurrent, "concurrent", 10, "The number of concurrent resource reconciles.") flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&healthAddr, "health-addr", ":8081", "The address the health endpoint binds to.") flag.StringVar(&storagePath, "storage-path", "/data", "The local storage path.") + flag.StringVar(&defaultServiceAccount, "default-service-account", "", + "Default service account used for impersonation.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") @@ -175,6 +178,22 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", fluxcdv1.FluxReportKind) os.Exit(1) } + + if err = (&controller.ResourceGroupReconciler{ + Client: mgr.GetClient(), + APIReader: mgr.GetAPIReader(), + Scheme: mgr.GetScheme(), + StatusPoller: polling.NewStatusPoller(mgr.GetClient(), mgr.GetRESTMapper(), polling.Options{}), + StatusManager: controllerName, + EventRecorder: mgr.GetEventRecorderFor(controllerName), + DefaultServiceAccount: defaultServiceAccount, + }).SetupWithManager(mgr, + controller.ResourceGroupReconcilerOptions{ + RateLimiter: runtimeCtrl.GetRateLimiter(rateLimiterOptions), + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", fluxcdv1.ResourceGroupKind) + os.Exit(1) + } // +kubebuilder:scaffold:builder probes.SetupChecks(mgr, setupLog) diff --git a/config/crd/bases/fluxcd.controlplane.io_resourcegroups.yaml b/config/crd/bases/fluxcd.controlplane.io_resourcegroups.yaml new file mode 100644 index 0000000..61eb39b --- /dev/null +++ b/config/crd/bases/fluxcd.controlplane.io_resourcegroups.yaml @@ -0,0 +1,227 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: resourcegroups.fluxcd.controlplane.io +spec: + group: fluxcd.controlplane.io + names: + kind: ResourceGroup + listKind: ResourceGroupList + plural: resourcegroups + shortNames: + - rg + singular: resourcegroup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: ResourceGroup is the Schema for the ResourceGroups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceGroupSpec defines the desired state of ResourceGroup + properties: + commonMetadata: + description: |- + CommonMetadata specifies the common labels and annotations that are + applied to all resources. Any existing label or annotation will be + overridden if its key matches a common one. + properties: + annotations: + additionalProperties: + type: string + description: Annotations to be added to the object's metadata. + type: object + labels: + additionalProperties: + type: string + description: Labels to be added to the object's metadata. + type: object + type: object + dependsOn: + description: |- + DependsOn specifies the list of Kubernetes resources that must + exist on the cluster before the reconciliation process starts. + items: + description: Dependency defines a ResourceGroup dependency on a + Kubernetes resource. + properties: + apiVersion: + description: APIVersion of the resource to depend on. + type: string + kind: + description: Kind of the resource to depend on. + type: string + name: + description: Name of the resource to depend on. + type: string + namespace: + description: Namespace of the resource to depend on. + type: string + ready: + description: Ready checks if the resource Ready status condition + is true. + type: boolean + required: + - apiVersion + - kind + - name + type: object + type: array + inputs: + description: Inputs contains the list of resource group inputs. + items: + additionalProperties: + type: string + description: ResourceGroupInput defines the key-value pairs of the + resource group input. + type: object + type: array + resources: + description: Resources contains the list of Kubernetes resources to + reconcile. + items: + x-kubernetes-preserve-unknown-fields: true + type: array + serviceAccountName: + description: |- + The name of the Kubernetes service account to impersonate + when reconciling the generated resources. + type: string + wait: + default: true + description: |- + Wait instructs the controller to check the health of all the reconciled + resources. Defaults to true. + type: boolean + type: object + status: + description: ResourceGroupStatus defines the observed state of ResourceGroup + properties: + conditions: + description: Conditions contains the readiness conditions of the object. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + inventory: + description: |- + Inventory contains a list of Kubernetes resource object references + last applied on the cluster. + properties: + entries: + description: Entries of Kubernetes resource object references. + items: + description: ResourceRef contains the information necessary + to locate a resource within a cluster. + properties: + id: + description: |- + ID is the string representation of the Kubernetes resource object's metadata, + in the format '___'. + type: string + v: + description: Version is the API version of the Kubernetes + resource object's kind. + type: string + required: + - id + - v + type: object + type: array + required: + - entries + type: object + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 1631b82..00e8241 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,3 +3,4 @@ kind: Kustomization resources: - bases/fluxcd.controlplane.io_fluxinstances.yaml - bases/fluxcd.controlplane.io_fluxreports.yaml +- bases/fluxcd.controlplane.io_resourcegroups.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 8d8730d..b54020c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -9,6 +9,7 @@ rules: resources: - fluxinstances - fluxreports + - resourcegroups verbs: - create - delete @@ -22,6 +23,7 @@ rules: resources: - fluxinstances/finalizers - fluxreports/finalizers + - resourcegroups/finalizers verbs: - update - apiGroups: @@ -29,6 +31,7 @@ rules: resources: - fluxinstances/status - fluxreports/status + - resourcegroups/status verbs: - get - patch diff --git a/config/samples/fluxcd_v1_resourcegroup.yaml b/config/samples/fluxcd_v1_resourcegroup.yaml new file mode 100644 index 0000000..b0ebff7 --- /dev/null +++ b/config/samples/fluxcd_v1_resourcegroup.yaml @@ -0,0 +1,49 @@ +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: podinfo + namespace: default + annotations: + fluxcd.controlplane.io/reconcile: "enabled" + fluxcd.controlplane.io/reconcileEvery: "30m" + fluxcd.controlplane.io/reconcileTimeout: "5m" +spec: + dependsOn: + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: helmreleases.helm.toolkit.fluxcd.io + ready: true + commonMetadata: + labels: + app.kubernetes.io/name: podinfo + inputs: + - tenant: "team1" + version: "6.7.x" + replicas: "2" + - tenant: "team2" + version: "6.6.x" + replicas: "3" + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: podinfo-<< inputs.tenant >> + namespace: default + spec: + interval: 10m + url: oci://ghcr.io/stefanprodan/charts/podinfo + ref: + semver: << inputs.version | quote >> + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: podinfo-<< inputs.tenant >> + namespace: default + spec: + interval: 1h + releaseName: podinfo-<< inputs.tenant >> + chartRef: + kind: OCIRepository + name: podinfo-<< inputs.tenant >> + values: + replicaCount: << inputs.replicas | int >> diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index c94efe2..618386f 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -2,4 +2,5 @@ resources: - fluxcd_v1_fluxinstance.yaml - fluxcd_v1_fluxreport.yaml +- fluxcd_v1_resourcegroup.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/api/v1/resourcegroup.md b/docs/api/v1/resourcegroup.md new file mode 100644 index 0000000..e7ef2a3 --- /dev/null +++ b/docs/api/v1/resourcegroup.md @@ -0,0 +1,495 @@ +# Resource Group CRD + +**ResourceGroup** is a declarative API for generating a group of Kubernetes objects +based on a matrix of input values and a set of templated resources. + +The ResourceGroup API offers a high-level abstraction for defining and managing +Flux resources and related Kubernetes objects as a single unit. It is designed +to reduce the complexity of Kustomize overlays by providing a compact way +of defining different configurations for a set of workloads per tenant and/or environment. + +Use cases: + +- Application definition: Bundle a set of Kubernetes resources (Flux HelmRelease, OCIRepository, Alert, Provider, Receiver, ImagePolicy) into a single deployable unit. +- Dependency management: Define dependencies between apps to ensure that the resources are applied in the correct order. The dependencies are more flexible than in Flux, they can be for other ResourceGroups, CRDs, or any other Kubernetes object. +- Multi-instance provisioning: Generate multiple instances of the same application with different configurations. +- Multi-cluster provisioning: Generate multiple instances of the same application for each target cluster that are deployed by Flux from a management cluster. +- Multi-tenancy provisioning: Generate a set of resources (Namespace, ServiceAccount, RoleBinding) for each tenant with specific roles and permissions. + +## Example + +The following example shows a ResourceGroup that generates an application instance consisting of a +Flux HelmRelease and OCIRepository for each tenant with a specific version and replica count. + +```yaml +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: podinfo + namespace: default + annotations: + fluxcd.controlplane.io/reconcile: "enabled" + fluxcd.controlplane.io/reconcileEvery: "30m" + fluxcd.controlplane.io/reconcileTimeout: "5m" +spec: + commonMetadata: + labels: + app.kubernetes.io/name: podinfo + inputs: + - tenant: "team1" + version: "6.7.x" + replicas: "2" + - tenant: "team2" + version: "6.6.x" + replicas: "3" + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: podinfo-<< inputs.tenant >> + namespace: default + spec: + interval: 10m + url: oci://ghcr.io/stefanprodan/charts/podinfo + ref: + semver: << inputs.version | quote >> + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: podinfo-<< inputs.tenant >> + namespace: default + spec: + interval: 1h + releaseName: podinfo-<< inputs.tenant >> + chartRef: + kind: OCIRepository + name: podinfo-<< inputs.tenant >> + values: + replicaCount: << inputs.replicas | int >> +``` + +You can run this example by saving the manifest into `podinfo.yaml`. + +1. Apply the ResourceGroup on the cluster: + + ```shell + kubectl apply -f podinfo.yaml + ``` + +2. Wait for the ResourceGroup to reconcile the generated resources: + + ```shell + kubectl wait resourcegroup/podinfo --for=condition=ready --timeout=5m + ``` + +3. Run `kubectl get resourcegroup` to see the status of the resource: + + ```console + $ kubectl get resourcegroup + NAME AGE READY STATUS + podinfo 59s True Reconciliation finished in 52s + ``` + +4. Run `kubectl describe resourcegroup` to see the reconciliation status conditions and events: + + ```console + $ kubectl describe resourcegroup podinfo + Status: + Conditions: + Last Transition Time: 2024-09-24T09:58:53Z + Message: Reconciliation finished in 52s + Observed Generation: 1 + Reason: ReconciliationSucceeded + Status: True + Type: Ready + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ApplySucceeded 72s flux-operator HelmRelease/default/podinfo-team1 created + HelmRelease/default/podinfo-team2 created + OCIRepository/default/podinfo-team1 created + OCIRepository/default/podinfo-team2 created + Normal ReconciliationSucceeded 72s flux-operator Reconciliation finished in 52s + ``` + +5. Run `kubectl events` to see the events generated by the flux-operator: + + ```shell + kubectl events --for resourcegroup/podinfo + ``` + +6. Run `kubectl delete` to remove the ResourceGroup and its generated resources: + + ```shell + kubectl delete resourcegroup podinfo + ``` + +## Writing a ResourceGroup spec + +As with all other Kubernetes config, a ResourceGroup needs `apiVersion`, +`kind`, `metadata.name` and `metadata.namespace` fields. +The name of a ResourceGroup object must be a valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). +A ResourceGroup also needs a [`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Inputs configuration + +The `.spec.inputs` field is optional and specifies a list of input values +to be used in the resources templates. + +Example inputs: + +```yaml +spec: + inputs: + - tenant: team1 + version: "6.7.x" + replicas: "2" + - tenant: team2 + version: "6.6.x" + replicas: "3" +``` + +An input value is a key-value pair of strings, where the key is the input name +which can be referenced in the resource templates using the `<< inputs.name >>` syntax. + +### Resources configuration + +The `.spec.resources` field is optional and specifies the list of Kubernetes resource +to be generated and reconciled on the cluster. + +Example of plain resources without any templating: + +```yaml +spec: + resources: + - apiVersion: v1 + kind: Namespace + metadata: + name: apps + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: flux + namespace: apps +``` + +#### Templating resources + +The resources can be templated using the `<< inputs.name >>` syntax. The templating engine +is based on Go text template. The `<< >>` delimiters are used instead of `{{ }}` to avoid +conflicts with Helm templating and allow ResourceGroups to be included in Helm charts. + +Example of templated resources: + +```yaml +spec: + inputs: + - tenant: team1 + role: admin + - tenant: team2 + role: cluster-admin + resources: + - apiVersion: v1 + kind: Namespace + metadata: + name: << inputs.tenant >> + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: flux + namespace: << inputs.tenant >> + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: flux + namespace: << inputs.tenant >> + subjects: + - kind: ServiceAccount + name: flux + namespace: << inputs.tenant >> + roleRef: + kind: ClusterRole + name: << inputs.role >> + apiGroup: rbac.authorization.k8s.io +``` + +The above example will generate a `Namespace`, `ServiceAccount` and `RoleBinding` for each tenant +with the specified role. + +#### Templating functions + +The templating engine supports [slim-sprig](https://go-task.github.io/slim-sprig/) functions. + +It is recommended to use the `quote` function when templating strings to avoid issues with +special characters e.g. `<< inputs.version | quote >>`. + +When templating integers, use the `int` function to convert the string to an integer +e.g. `<< inputs.replicas | int >>`. + +When templating booleans, use the `bool` function to convert the string to a boolean +e.g. `<< inputs.enabled | bool >>`. + +When using integer or boolean inputs as metadata label values, use the `quote` function to convert +the value to a string e.g. `<< inputs.enabled | quote >>`. + +When using multi-line strings containing YAML, use the `nindent` function to properly format the string +e.g.: + +```yaml +spec: + inputs: + - tenant: team1 + layerSelector: | + mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + operation: copy + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: << inputs.tenant >> + spec: + layerSelector: << inputs.layerSelector | nindent 4 >> +``` + +#### Resource deduplication + +The flux-operator deduplicates resources based on the +`apiVersion`, `kind`, `metadata.name` and `metadata.namespace` fields. + +This allows defining shared resources that are applied only once, regardless of the number of inputs. + +Example of a shared Flux source: + +```yaml +spec: + inputs: + - tenant: "team1" + replicas: "2" + - tenant: "team2" + replicas: "3" + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: podinfo + namespace: default + spec: + interval: 10m + url: oci://ghcr.io/stefanprodan/charts/podinfo + ref: + semver: '*' + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: podinfo-<< inputs.tenant >> + namespace: default + spec: + interval: 1h + releaseName: podinfo-<< inputs.tenant >> + chartRef: + kind: OCIRepository + name: podinfo + values: + replicaCount: << inputs.replicas | int >> +``` + +In the above example, the `OCIRepository` resource is created only once +and referred by all `HelmRelease` resources. + +### Common metadata + +The `.spec.commonMetadata` field is optional and specifies common metadata to be applied to all resources. + +It has two optional fields: + +- `labels`: A map used for setting [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + on an object. Any existing label will be overridden if it matches with a key in + this map. +- `annotations`: A map used for setting [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + on an object. Any existing annotation will be overridden if it matches with a key + in this map. + +Example common metadata: + +```yaml +spec: + commonMetadata: + labels: + app.kubernetes.io/name: podinfo + annotations: + fluxcd.controlplane.io/prune: disabled +``` + +In the above example, all resources generated by the ResourceGroup +will not be pruned by the garbage collection process as the `fluxcd.controlplane.io/prune` +annotation is set to `disabled`. + +### Dependency management + +`.spec.dependsOn` is an optional list used to refer to Kubernetes +objects that the ResourceGroup depends on. If specified, then the ResourceGroup +is reconciled after the referred objects exist in the cluster. + +A dependency is a reference to a Kubernetes object with the following fields: + +- `apiVersion`: The API version of the referred object (required). +- `kind`: The kind of the referred object (required). +- `name`: The name of the referred object (required). +- `namespace`: The namespace of the referred object (optional). +- `ready`: A boolean indicating if the referred object must have the `Ready` status condition set to `True` (optional, default is `false`). + +Example of conditional reconciliation based on the existence of CustomResourceDefinitions +and the readiness of a ResourceGroup: + +```yaml +spec: + dependsOn: + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: helmreleases.helm.toolkit.fluxcd.io + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: servicemonitors.monitoring.coreos.com + - apiVersion: fluxcd.controlplane.io/v1 + kind: ResourceGroup + name: cluster-addons + namespace: flux-system + ready: true +``` + +Note that is recommended to define dependencies on CustomResourceDefinitions if the ResourceGroup +deploys Flux HelmReleases which contain custom resources. + +When the dependencies are not met, the flux-operator will reevaluate the requirements +every five seconds and reconcile the ResourceGroup when the dependencies are satisfied. +Failed dependencies are reported in the ResourceGroup `Ready` [status condition](#ResourceGroup-Status), +in log messages and Kubernetes events. + +### Reconciliation configuration + +The reconciliation of behaviour of a ResourceGroup can be configured using the following annotations: + +- `fluxcd.controlplane.io/reconcile`: Enable or disable the reconciliation loop. Default is `enabled`, set to `disabled` to pause the reconciliation. +- `fluxcd.controlplane.io/reconcileEvery`: Set the reconciliation interval used for drift detection and correction. Default is `1h`. +- `fluxcd.controlplane.io/reconcileTimeout`: Set the reconciliation timeout including health checks. Default is `5m`. + +### Health check configuration + +The `.spec.wait` field is optional and instructs the flux-operator to perform +a health check on all applied resources and waits for them to become ready. The health +check is enabled by default and can be disabled by setting the `.spec.wait` field to `false`. + +The health check is performed for the following resources types: + +- Kubernetes built-in kinds: Deployment, DaemonSet, StatefulSet, + PersistentVolumeClaim, Service, Ingress, CustomResourceDefinition. +- Flux kinds: HelmRelease, OCIRepository, Kustomization, GitRepository, etc. +- Custom resources that are compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus). + +By default, the wait timeout is `5m` and can be changed with the +`fluxcd.controlplane.io/reconcileTimeout` annotation, set on the ResourceGroup object. + +### Role-based access control + +The `.spec.serviceAccountName` field is optional and specifies the name of the +Kubernetes ServiceAccount used by the flux-operator to reconcile the ResourceGroup. +The ServiceAccount must exist in the same namespace as the ResourceGroup +and must have the necessary permissions to create, update and delete +the resources defined in the ResourceGroup. + +On multi-tenant clusters, it is recommended to use a dedicated ServiceAccount per tenant namespace +with the minimum required permissions. To enforce a ServiceAccount for all ResourceGroups, +the `--default-service-account=flux-operator`flag can be set in the flux-operator container arguments. +With this flag set, only the ResourceGroups created in the same namespace as the flux-operator +will run with cluster-admin permissions. + +## ResourceGroup Status + +### Conditions + +A ResourceGroup enters various states during its lifecycle, reflected as Kubernetes Conditions. +It can be [reconciling](#reconciling-fluxinstance) while applying the +resources on the cluster, it can be [ready](#ready-fluxinstance), or it can [fail during +reconciliation](#failed-fluxinstance). + +The ResourceGroup API is compatible with the **kstatus** specification, +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the ResourceGroup to +become `Ready`. + +#### Reconciling ResourceGroup + +The flux-operator marks a ResourceGroup as _reconciling_ when it starts +the reconciliation of the same. The Condition added to the ResourceGroup's +`.status.conditions` has the following attributes: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +The Condition `message` is updated during the course of the reconciliation to +report the action being performed at any particular moment such as +building manifests, detecting drift, etc. + +The `Ready` Condition's `status` is also marked as `Unknown`. + +#### Ready ResourceGroup + +The flux-operator marks a ResourceGroup as _ready_ when the resources were +built and applied on the cluster and all health checks are observed to be passing. + +When the ResourceGroup is "ready", the flux-operator sets a Condition with the +following attributes in the ResourceGroup’s `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: ReconciliationSucceeded` + +#### Failed ResourceGroup + +The flux-operator may get stuck trying to reconcile and apply a +ResourceGroup without completing. This can occur due to some of the following factors: + +- The dependencies are not ready. +- The templating of the resources fails. +- The resources are invalid and cannot be applied. +- Garbage collection fails. +- Running health checks fails. + +When this happens, the flux-operator sets the `Ready` Condition status to False +and adds a Condition with the following attributes to the ResourceGroup’s +`.status.conditions`: + +- `type: Ready` +- `status: "False"` +- `reason: DependencyNotReady | BuildFailed | ReconciliationFailed | HealthCheckFailed` + +The `message` field of the Condition will contain more information about why +the reconciliation failed. + +While the ResourceGroup has one or more of these Conditions, the flux-operator +will continue to attempt a reconciliation with an +exponential backoff, until it succeeds and the ResourceGroup is marked as [ready](#ready-fluxinstance). + +### Inventory status + +In order to perform operations such as drift detection, garbage collection, upgrades, etc., +the flux-operator needs to keep track of all Kubernetes objects that are +reconciled as part of a ResourceGroup. To do this, it maintains an inventory +containing the list of Kubernetes resource object references that have been +successfully applied and records it in `.status.inventory`. The inventory +records are in the format `Id: ___, V: `. + +Example: + +```text +Status: + Inventory: + Entries: + Id: default_podinfo__ServiceAccount + V: v1 + Id: default_podinfo__Service + V: v1 + Id: default_podinfo_apps_Deployment + V: v1 +``` diff --git a/go.mod b/go.mod index 8bbbc40..98875a6 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/fluxcd/pkg/runtime v0.49.1 github.com/fluxcd/pkg/ssa v0.41.1 github.com/fluxcd/pkg/tar v0.8.1 + github.com/go-task/slim-sprig/v3 v3.0.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/go-containerregistry v0.20.2 github.com/onsi/ginkgo/v2 v2.20.2 @@ -72,7 +73,6 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect diff --git a/internal/builder/resourcegroup.go b/internal/builder/resourcegroup.go new file mode 100644 index 0000000..550e703 --- /dev/null +++ b/internal/builder/resourcegroup.go @@ -0,0 +1,97 @@ +// Copyright 2024 Stefan Prodan. +// SPDX-License-Identifier: AGPL-3.0 + +package builder + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + ssautil "github.com/fluxcd/pkg/ssa/utils" + sprig "github.com/go-task/slim-sprig/v3" + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" +) + +// BuildResourceGroup builds a list of Kubernetes resources +// from a list of JSON templates using the provided inputs. +func BuildResourceGroup(templates []*apix.JSON, inputs []map[string]string) ([]*unstructured.Unstructured, error) { + var objects []*unstructured.Unstructured + for i, tmpl := range templates { + if len(inputs) == 0 { + object, err := BuildResource(tmpl, nil) + if err != nil { + return nil, fmt.Errorf("failed to build resource: %w", err) + } + + objects = append(objects, object) + continue + } + + for _, input := range inputs { + object, err := BuildResource(tmpl, input) + if err != nil { + return nil, fmt.Errorf("failed to build resources[%d]: %w", i, err) + } + + found := false + for _, obj := range objects { + if obj.GetAPIVersion() == object.GetAPIVersion() && + obj.GetKind() == object.GetKind() && + obj.GetNamespace() == object.GetNamespace() && + obj.GetName() == object.GetName() { + found = true + break + } + } + + if !found { + objects = append(objects, object) + } + } + } + + return objects, nil +} + +// BuildResource builds a Kubernetes resource from a JSON template using the provided inputs. +// Template functions are provided by the slim-sprig library https://go-task.github.io/slim-sprig/. +func BuildResource(tmpl *apix.JSON, inputs map[string]string) (*unstructured.Unstructured, error) { + ymlTemplate, err := yaml.JSONToYAML(tmpl.Raw) + if err != nil { + return nil, fmt.Errorf("failed to convert template to YAML: %w", err) + } + + var fnInputs = template.FuncMap{"inputs": func() map[string]string { + values := make(map[string]string) + for k, v := range inputs { + values[k] = v + } + return values + }} + + tp, err := template.New("res"). + Delims("<<", ">>"). + Funcs(sprig.HermeticTxtFuncMap()). + Funcs(fnInputs). + Parse(string(ymlTemplate)) + if err != nil { + return nil, fmt.Errorf("failed to parse template: %w", err) + } + + b := &strings.Builder{} + err = tp.Execute(b, nil) + if err != nil { + return nil, fmt.Errorf("failed to execute template: %w", err) + } + + object, err := ssautil.ReadObject(bytes.NewReader([]byte(b.String()))) + if err != nil { + return nil, fmt.Errorf("failed to read object: %w", err) + } + + return object, nil +} diff --git a/internal/builder/resourcegroup_test.go b/internal/builder/resourcegroup_test.go new file mode 100644 index 0000000..4e255a1 --- /dev/null +++ b/internal/builder/resourcegroup_test.go @@ -0,0 +1,140 @@ +// Copyright 2024 Stefan Prodan. +// SPDX-License-Identifier: AGPL-3.0 + +package builder + +import ( + "os" + "path/filepath" + "testing" + + ssautil "github.com/fluxcd/pkg/ssa/utils" + . "github.com/onsi/gomega" + "sigs.k8s.io/yaml" + + v1 "github.com/controlplaneio-fluxcd/flux-operator/api/v1" +) + +func TestBuildResourceGroup_Default(t *testing.T) { + g := NewWithT(t) + + srcFile := filepath.Join("testdata", "resourcegroup", "default.yaml") + goldenFile := filepath.Join("testdata", "resourcegroup", "default.golden.yaml") + + data, err := os.ReadFile(srcFile) + g.Expect(err).ToNot(HaveOccurred()) + + var rg v1.ResourceGroup + err = yaml.Unmarshal(data, &rg) + g.Expect(err).ToNot(HaveOccurred()) + + objects, err := BuildResourceGroup(rg.Spec.Resources, rg.GetInputs()) + g.Expect(err).ToNot(HaveOccurred()) + + manifests, err := ssautil.ObjectsToYAML(objects) + g.Expect(err).ToNot(HaveOccurred()) + + //if shouldGenGolden() { + err = os.WriteFile(goldenFile, []byte(manifests), 0644) + g.Expect(err).NotTo(HaveOccurred()) + //} + + goldenK, err := os.ReadFile(goldenFile) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(manifests).To(Equal(string(goldenK))) +} + +func TestBuildResourceGroup_Deduplication(t *testing.T) { + g := NewWithT(t) + + srcFile := filepath.Join("testdata", "resourcegroup", "dedup.yaml") + goldenFile := filepath.Join("testdata", "resourcegroup", "dedup.golden.yaml") + + data, err := os.ReadFile(srcFile) + g.Expect(err).ToNot(HaveOccurred()) + + var rg v1.ResourceGroup + err = yaml.Unmarshal(data, &rg) + g.Expect(err).ToNot(HaveOccurred()) + + objects, err := BuildResourceGroup(rg.Spec.Resources, rg.GetInputs()) + g.Expect(err).ToNot(HaveOccurred()) + + manifests, err := ssautil.ObjectsToYAML(objects) + g.Expect(err).ToNot(HaveOccurred()) + + if shouldGenGolden() { + err = os.WriteFile(goldenFile, []byte(manifests), 0644) + g.Expect(err).NotTo(HaveOccurred()) + } + + goldenK, err := os.ReadFile(goldenFile) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(manifests).To(Equal(string(goldenK))) +} + +func TestBuildResourceGroup_NoInputs(t *testing.T) { + g := NewWithT(t) + + srcFile := filepath.Join("testdata", "resourcegroup", "noinputs.yaml") + goldenFile := filepath.Join("testdata", "resourcegroup", "noinputs.golden.yaml") + + data, err := os.ReadFile(srcFile) + g.Expect(err).ToNot(HaveOccurred()) + + var rg v1.ResourceGroup + err = yaml.Unmarshal(data, &rg) + g.Expect(err).ToNot(HaveOccurred()) + + objects, err := BuildResourceGroup(rg.Spec.Resources, rg.GetInputs()) + g.Expect(err).ToNot(HaveOccurred()) + + manifests, err := ssautil.ObjectsToYAML(objects) + g.Expect(err).ToNot(HaveOccurred()) + + if shouldGenGolden() { + err = os.WriteFile(goldenFile, []byte(manifests), 0644) + g.Expect(err).NotTo(HaveOccurred()) + } + + goldenK, err := os.ReadFile(goldenFile) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(manifests).To(Equal(string(goldenK))) +} + +func TestBuildResourceGroup_Empty(t *testing.T) { + g := NewWithT(t) + + srcFile := filepath.Join("testdata", "resourcegroup", "empty.yaml") + + data, err := os.ReadFile(srcFile) + g.Expect(err).ToNot(HaveOccurred()) + + var rg v1.ResourceGroup + err = yaml.Unmarshal(data, &rg) + g.Expect(err).ToNot(HaveOccurred()) + + objects, err := BuildResourceGroup(rg.Spec.Resources, rg.GetInputs()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(objects).To(BeEmpty()) +} + +func TestBuildResourceGroup_Error(t *testing.T) { + g := NewWithT(t) + + srcFile := filepath.Join("testdata", "resourcegroup", "error.yaml") + + data, err := os.ReadFile(srcFile) + g.Expect(err).ToNot(HaveOccurred()) + + var rg v1.ResourceGroup + err = yaml.Unmarshal(data, &rg) + g.Expect(err).ToNot(HaveOccurred()) + + _, err = BuildResourceGroup(rg.Spec.Resources, rg.GetInputs()) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("failed to build resources[0]")) +} diff --git a/internal/builder/testdata/resourcegroup/dedup.golden.yaml b/internal/builder/testdata/resourcegroup/dedup.golden.yaml new file mode 100644 index 0000000..7971008 --- /dev/null +++ b/internal/builder/testdata/resourcegroup/dedup.golden.yaml @@ -0,0 +1,39 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: app1 + namespace: apps +spec: + interval: 10m + ref: + semver: '*' + url: oci://ghcr.io/org/charts/app1 +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: app1-team1 + namespace: apps +spec: + chartRef: + kind: OCIRepository + name: app1 + interval: 1h + releaseName: app1-team1 + values: + replicas: 2 +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: app1-team2 + namespace: apps +spec: + chartRef: + kind: OCIRepository + name: app1 + interval: 1h + releaseName: app1-team2 + values: + replicas: 3 +--- diff --git a/internal/builder/testdata/resourcegroup/dedup.yaml b/internal/builder/testdata/resourcegroup/dedup.yaml new file mode 100644 index 0000000..aca74e2 --- /dev/null +++ b/internal/builder/testdata/resourcegroup/dedup.yaml @@ -0,0 +1,35 @@ +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: app1 + namespace: apps +spec: + inputs: + - tenant: team1 + replicas: "2" + - tenant: team2 + replicas: "3" + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: app1 + namespace: apps + spec: + interval: 10m + url: oci://ghcr.io/org/charts/app1 + ref: + semver: '*' + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: app1-<< inputs.tenant >> + namespace: apps + spec: + interval: 1h + releaseName: app1-<< inputs.tenant >> + chartRef: + kind: OCIRepository + name: app1 + values: + replicas: << inputs.replicas | int >> diff --git a/internal/builder/testdata/resourcegroup/default.golden.yaml b/internal/builder/testdata/resourcegroup/default.golden.yaml new file mode 100644 index 0000000..9b940bb --- /dev/null +++ b/internal/builder/testdata/resourcegroup/default.golden.yaml @@ -0,0 +1,62 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: team1-app1 + namespace: apps +spec: + interval: 10m + layerSelector: + mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip + operation: copy + ref: + semver: '>=1.0.0-rc.0' + semverFilter: .*-rc.* + url: oci://ghcr.io/org/charts/app1 +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: team2-app1 + namespace: apps +spec: + interval: 10m + layerSelector: null + ref: + semver: '>=1.0.0' + semverFilter: '*' + url: oci://ghcr.io/org/charts/app1 +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + labels: + replicas: "2" + name: team1-app1 + namespace: apps +spec: + chartRef: + kind: OCIRepository + name: team1-app1 + interval: 1h + releaseName: team1-app1 + values: + domain: t1.example.com + replicas: 2 +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + labels: + replicas: "3" + name: team2-app1 + namespace: apps +spec: + chartRef: + kind: OCIRepository + name: team2-app1 + interval: 1h + releaseName: team2-app1 + values: + domain: t2.example.com + replicas: 3 +--- diff --git a/internal/builder/testdata/resourcegroup/default.yaml b/internal/builder/testdata/resourcegroup/default.yaml new file mode 100644 index 0000000..78700cd --- /dev/null +++ b/internal/builder/testdata/resourcegroup/default.yaml @@ -0,0 +1,48 @@ +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: app1 + namespace: apps +spec: + inputs: + - tenant: team1 + domain: t1.example.com + semver: ">=1.0.0-rc.0" + semverFilter: ".*-rc.*" + replicas: "2" + layerSelector: | + mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + operation: copy + - tenant: team2 + domain: t2.example.com + semver: ">=1.0.0" + replicas: "3" + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: << inputs.tenant >>-app1 + namespace: apps + spec: + interval: 10m + url: oci://ghcr.io/org/charts/app1 + layerSelector: << inputs.layerSelector | default "~" | nindent 4 >> + ref: + semver: << inputs.semver | quote >> + semverFilter: << inputs.semverFilter | default "*" | quote >> + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: << inputs.tenant >>-app1 + namespace: apps + labels: + replicas: << inputs.replicas | quote >> + spec: + interval: 1h + releaseName: << inputs.tenant >>-app1 + chartRef: + kind: OCIRepository + name: << inputs.tenant >>-app1 + values: + domain: << inputs.domain >> + replicas: << inputs.replicas | int >> diff --git a/internal/builder/testdata/resourcegroup/empty.yaml b/internal/builder/testdata/resourcegroup/empty.yaml new file mode 100644 index 0000000..79cc04e --- /dev/null +++ b/internal/builder/testdata/resourcegroup/empty.yaml @@ -0,0 +1,9 @@ +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: app1 + namespace: apps +spec: + commonMetadata: + labels: + app.kubernetes.io/name: podinfo diff --git a/internal/builder/testdata/resourcegroup/error.yaml b/internal/builder/testdata/resourcegroup/error.yaml new file mode 100644 index 0000000..b5653f0 --- /dev/null +++ b/internal/builder/testdata/resourcegroup/error.yaml @@ -0,0 +1,33 @@ +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: app1 + namespace: apps +spec: + inputs: + - tenant: team1 + semver: ">=1.0.0-rc.0" + - tenant: team2 + semver: ">=1.0.0-rc.0" + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: app1-<< inputs.tenant >> + namespace: apps + spec: + interval: 10m + url: oci://ghcr.io/org/charts/app1 + ref: + semver: << inputs.semver >> + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: app1-<< inputs.tenant >> + namespace: apps + spec: + interval: 1h + releaseName: app1-<< inputs.tenant >> + chartRef: + kind: OCIRepository + name: app1-<< inputs.tenant >> diff --git a/internal/builder/testdata/resourcegroup/noinputs.golden.yaml b/internal/builder/testdata/resourcegroup/noinputs.golden.yaml new file mode 100644 index 0000000..03cac4b --- /dev/null +++ b/internal/builder/testdata/resourcegroup/noinputs.golden.yaml @@ -0,0 +1,25 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: app1 + namespace: apps +spec: + interval: 10m + ref: + semver: '*' + url: oci://ghcr.io/org/charts/app1 +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: app1 + namespace: apps +spec: + chartRef: + kind: OCIRepository + name: app1 + interval: 1h + releaseName: app1 + values: + replicas: 2 +--- diff --git a/internal/builder/testdata/resourcegroup/noinputs.yaml b/internal/builder/testdata/resourcegroup/noinputs.yaml new file mode 100644 index 0000000..13b81a8 --- /dev/null +++ b/internal/builder/testdata/resourcegroup/noinputs.yaml @@ -0,0 +1,30 @@ +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: app1 + namespace: apps +spec: + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: OCIRepository + metadata: + name: app1 + namespace: apps + spec: + interval: 10m + url: oci://ghcr.io/org/charts/app1 + ref: + semver: '*' + - apiVersion: helm.toolkit.fluxcd.io/v2 + kind: HelmRelease + metadata: + name: app1 + namespace: apps + spec: + interval: 1h + releaseName: app1 + chartRef: + kind: OCIRepository + name: app1 + values: + replicas: << inputs.replicas | default 2 | int >> diff --git a/internal/controller/fluxinstance_controller.go b/internal/controller/fluxinstance_controller.go index 78d012d..5d6a8ba 100644 --- a/internal/controller/fluxinstance_controller.go +++ b/internal/controller/fluxinstance_controller.go @@ -91,14 +91,13 @@ func (r *FluxInstanceReconciler) Reconcile(ctx context.Context, req ctrl.Request if !controllerutil.ContainsFinalizer(obj, fluxcdv1.Finalizer) { log.Info("Adding finalizer", "finalizer", fluxcdv1.Finalizer) controllerutil.AddFinalizer(obj, fluxcdv1.Finalizer) - msg := "Reconciliation in progress" conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, - "%s", msg) + "%s", msgInProgress) conditions.MarkReconciling(obj, meta.ProgressingReason, - "%s", msg) + "%s", msgInProgress) return ctrl.Result{Requeue: true}, nil } @@ -121,14 +120,13 @@ func (r *FluxInstanceReconciler) reconcile(ctx context.Context, reconcileStart := time.Now() // Mark the object as reconciling. - msg := "Reconciliation in progress" conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, - "%s", msg) + "%s", msgInProgress) conditions.MarkReconciling(obj, meta.ProgressingReason, - "%s", msg) + "%s", msgInProgress) if err := r.patch(ctx, obj, patcher); err != nil { return ctrl.Result{}, fmt.Errorf("failed to update status: %w", err) } @@ -201,7 +199,7 @@ func (r *FluxInstanceReconciler) reconcile(ctx context.Context, // Mark the object as ready. obj.Status.LastAppliedRevision = obj.Status.LastAttemptedRevision - msg = fmt.Sprintf("Reconciliation finished in %s", fmtDuration(reconcileStart)) + msg := fmt.Sprintf("Reconciliation finished in %s", fmtDuration(reconcileStart)) conditions.MarkTrue(obj, meta.ReadyCondition, meta.ReconciliationSucceededReason, diff --git a/internal/controller/fluxinstance_manager.go b/internal/controller/fluxinstance_manager.go index 3308164..69c626d 100644 --- a/internal/controller/fluxinstance_manager.go +++ b/internal/controller/fluxinstance_manager.go @@ -14,6 +14,8 @@ import ( fluxcdv1 "github.com/controlplaneio-fluxcd/flux-operator/api/v1" ) +const msgInProgress = "Reconciliation in progress" + // FluxInstanceReconcilerOptions contains options for the reconciler. type FluxInstanceReconcilerOptions struct { RateLimiter workqueue.TypedRateLimiter[reconcile.Request] diff --git a/internal/controller/resourcegroup_controller.go b/internal/controller/resourcegroup_controller.go new file mode 100644 index 0000000..b916c40 --- /dev/null +++ b/internal/controller/resourcegroup_controller.go @@ -0,0 +1,517 @@ +// Copyright 2024 Stefan Prodan. +// SPDX-License-Identifier: AGPL-3.0 + +package controller + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/fluxcd/cli-utils/pkg/kstatus/polling" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/pkg/apis/meta" + runtimeClient "github.com/fluxcd/pkg/runtime/client" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/ssa" + "github.com/fluxcd/pkg/ssa/normalize" + ssautil "github.com/fluxcd/pkg/ssa/utils" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + fluxcdv1 "github.com/controlplaneio-fluxcd/flux-operator/api/v1" + "github.com/controlplaneio-fluxcd/flux-operator/internal/builder" + "github.com/controlplaneio-fluxcd/flux-operator/internal/inventory" +) + +// ResourceGroupReconciler reconciles a ResourceGroup object +type ResourceGroupReconciler struct { + client.Client + kuberecorder.EventRecorder + + APIReader client.Reader + Scheme *runtime.Scheme + StatusPoller *polling.StatusPoller + StatusManager string + DefaultServiceAccount string +} + +// +kubebuilder:rbac:groups=fluxcd.controlplane.io,resources=resourcegroups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=fluxcd.controlplane.io,resources=resourcegroups/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=fluxcd.controlplane.io,resources=resourcegroups/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *ResourceGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { + log := ctrl.LoggerFrom(ctx) + + obj := &fluxcdv1.ResourceGroup{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Initialize the runtime patcher with the current version of the object. + patcher := patch.NewSerialPatcher(obj, r.Client) + + // Finalise the reconciliation and report the results. + defer func() { + if err := r.finalizeStatus(ctx, obj, patcher); err != nil { + log.Error(err, "failed to update status") + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + }() + + // Uninstall if the object is under deletion. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.uninstall(ctx, obj) + } + + // Add the finalizer if it does not exist. + if !controllerutil.ContainsFinalizer(obj, fluxcdv1.Finalizer) { + log.Info("Adding finalizer", "finalizer", fluxcdv1.Finalizer) + controllerutil.AddFinalizer(obj, fluxcdv1.Finalizer) + conditions.MarkUnknown(obj, + meta.ReadyCondition, + meta.ProgressingReason, + "%s", msgInProgress) + conditions.MarkReconciling(obj, + meta.ProgressingReason, + "%s", msgInProgress) + return ctrl.Result{Requeue: true}, nil + } + + // Pause reconciliation if the object has the reconcile annotation set to 'disabled'. + if obj.IsDisabled() { + msg := "Reconciliation in disabled" + log.Error(errors.New("can't reconcile instance"), msg) + r.Event(obj, corev1.EventTypeWarning, "ReconciliationDisabled", msg) + return ctrl.Result{}, nil + } + + // Check dependencies and requeue the reconciliation if the check fails. + if err := r.checkDependencies(ctx, obj); err != nil { + msg := fmt.Sprintf("Retrying dependency check: %s", err.Error()) + if conditions.GetReason(obj, meta.ReadyCondition) != meta.DependencyNotReadyReason { + log.Error(err, "dependency check failed") + r.Event(obj, corev1.EventTypeNormal, meta.DependencyNotReadyReason, msg) + } + conditions.MarkFalse(obj, + meta.ReadyCondition, + meta.DependencyNotReadyReason, + "%s", msg) + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + // Reconcile the object. + return r.reconcile(ctx, obj, patcher) +} + +func (r *ResourceGroupReconciler) reconcile(ctx context.Context, + obj *fluxcdv1.ResourceGroup, + patcher *patch.SerialPatcher) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + reconcileStart := time.Now() + + // Mark the object as reconciling. + msg := "Reconciliation in progress" + conditions.MarkUnknown(obj, + meta.ReadyCondition, + meta.ProgressingReason, + "%s", msg) + conditions.MarkReconciling(obj, + meta.ProgressingReason, + "%s", msg) + if err := r.patch(ctx, obj, patcher); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update status: %w", err) + } + + // Build the resources. + buildResult, err := builder.BuildResourceGroup(obj.Spec.Resources, obj.GetInputs()) + if err != nil { + msg := fmt.Sprintf("build failed: %s", err.Error()) + conditions.MarkFalse(obj, + meta.ReadyCondition, + meta.BuildFailedReason, + "%s", msg) + conditions.MarkTrue(obj, + meta.StalledCondition, + meta.BuildFailedReason, + "%s", msg) + log.Error(err, msg) + r.EventRecorder.Event(obj, corev1.EventTypeWarning, meta.BuildFailedReason, msg) + return ctrl.Result{}, nil + } + + // Apply the resources to the cluster. + if err := r.apply(ctx, obj, buildResult); err != nil { + msg := fmt.Sprintf("reconciliation failed: %s", err.Error()) + conditions.MarkFalse(obj, + meta.ReadyCondition, + meta.ReconciliationFailedReason, + "%s", msg) + r.EventRecorder.Event(obj, corev1.EventTypeWarning, meta.ReconciliationFailedReason, msg) + + return ctrl.Result{}, err + } + + // Mark the object as ready. + msg = fmt.Sprintf("Reconciliation finished in %s", fmtDuration(reconcileStart)) + conditions.MarkTrue(obj, + meta.ReadyCondition, + meta.ReconciliationSucceededReason, + "%s", msg) + log.Info(msg) + r.EventRecorder.Event(obj, + corev1.EventTypeNormal, + meta.ReconciliationSucceededReason, + msg) + + return requeueAfterResourceGroup(obj), nil +} + +func (r *ResourceGroupReconciler) checkDependencies(ctx context.Context, + obj *fluxcdv1.ResourceGroup) error { + + for _, dep := range obj.Spec.DependsOn { + depObj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": dep.APIVersion, + "kind": dep.Kind, + "metadata": map[string]interface{}{ + "name": dep.Name, + "namespace": dep.Namespace, + }, + }, + } + + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(depObj), depObj); err != nil { + return fmt.Errorf("dependency %s/%s/%s not found: %w", dep.APIVersion, dep.Kind, dep.Name, err) + } + + if dep.Ready { + stat, err := status.Compute(depObj) + if err != nil { + return fmt.Errorf("dependency %s/%s/%s not ready: %w", dep.APIVersion, dep.Kind, dep.Name, err) + } + + if stat.Status != status.CurrentStatus { + return fmt.Errorf("dependency %s/%s/%s not ready: status %s", dep.APIVersion, dep.Kind, dep.Name, stat.Status) + } + } + } + + return nil +} + +// apply reconciles the resources in the cluster by performing +// a server-side apply, pruning of stale resources and waiting +// for the resources to become ready. +func (r *ResourceGroupReconciler) apply(ctx context.Context, + obj *fluxcdv1.ResourceGroup, + objects []*unstructured.Unstructured) error { + log := ctrl.LoggerFrom(ctx) + var changeSetLog strings.Builder + + // Create a snapshot of the current inventory. + oldInventory := inventory.New() + if obj.Status.Inventory != nil { + obj.Status.Inventory.DeepCopyInto(oldInventory) + } + + // Configure the Kubernetes client for impersonation. + impersonation := runtimeClient.NewImpersonator( + r.Client, + r.StatusPoller, + polling.Options{}, + nil, + runtimeClient.KubeConfigOptions{}, + r.DefaultServiceAccount, + obj.Spec.ServiceAccountName, + obj.GetNamespace(), + ) + + // Create the Kubernetes client that runs under impersonation. + kubeClient, statusPoller, err := impersonation.GetClient(ctx) + if err != nil { + return fmt.Errorf("failed to build kube client: %w", err) + } + + // Create a resource manager to reconcile the resources. + resourceManager := ssa.NewResourceManager(kubeClient, statusPoller, ssa.Owner{ + Field: r.StatusManager, + Group: fmt.Sprintf("resourcegroup.%s", fluxcdv1.GroupVersion.Group), + }) + resourceManager.SetOwnerLabels(objects, obj.GetName(), obj.GetNamespace()) + + if err := normalize.UnstructuredList(objects); err != nil { + return err + } + + if cm := obj.Spec.CommonMetadata; cm != nil { + ssautil.SetCommonMetadata(objects, cm.Labels, cm.Annotations) + } + + applyOpts := ssa.DefaultApplyOptions() + applyOpts.Cleanup = ssa.ApplyCleanupOptions{ + // Remove the kubectl and helm annotations. + Annotations: []string{ + corev1.LastAppliedConfigAnnotation, + "meta.helm.sh/release-name", + "meta.helm.sh/release-namespace", + }, + // Remove the flux labels set at bootstrap. + Labels: []string{ + "kustomize.toolkit.fluxcd.io/name", + "kustomize.toolkit.fluxcd.io/namespace", + }, + // Take ownership of the Flux resources if they + // were previously managed by other tools. + FieldManagers: []ssa.FieldManager{ + { + Name: "flux", + OperationType: metav1.ManagedFieldsOperationApply, + }, + { + Name: "kustomize-controller", + OperationType: metav1.ManagedFieldsOperationApply, + }, + { + Name: "helm", + OperationType: metav1.ManagedFieldsOperationUpdate, + }, + { + Name: "kubectl", + OperationType: metav1.ManagedFieldsOperationUpdate, + }, + }, + } + + resultSet := ssa.NewChangeSet() + + // Apply the resources to the cluster. + changeSet, err := resourceManager.ApplyAllStaged(ctx, objects, applyOpts) + if err != nil { + return err + } + + // Filter out the resources that have changed. + for _, change := range changeSet.Entries { + if hasChanged(change.Action) { + resultSet.Add(change) + changeSetLog.WriteString(change.String() + "\n") + } + } + + // Log the changeset. + if len(resultSet.Entries) > 0 { + log.Info("Server-side apply completed", + "output", resultSet.ToMap()) + } + + // Create an inventory from the reconciled resources. + newInventory := inventory.New() + err = inventory.AddChangeSet(newInventory, changeSet) + if err != nil { + return err + } + + // Set last applied inventory in status. + obj.Status.Inventory = newInventory + + // Detect stale resources which are subject to garbage collection. + staleObjects, err := inventory.Diff(oldInventory, newInventory) + if err != nil { + return err + } + + // Garbage collect stale resources. + if len(staleObjects) > 0 { + deleteOpts := ssa.DeleteOptions{ + PropagationPolicy: metav1.DeletePropagationBackground, + Inclusions: resourceManager.GetOwnerLabels(obj.Name, obj.Namespace), + Exclusions: map[string]string{ + fluxcdv1.PruneAnnotation: fluxcdv1.DisabledValue, + }, + } + + deleteSet, err := resourceManager.DeleteAll(ctx, staleObjects, deleteOpts) + if err != nil { + return err + } + + if len(deleteSet.Entries) > 0 { + for _, change := range deleteSet.Entries { + changeSetLog.WriteString(change.String() + "\n") + } + log.Info("Garbage collection completed", + "output", deleteSet.ToMap()) + } + } + + // Emit event only if the server-side apply resulted in changes. + applyLog := strings.TrimSuffix(changeSetLog.String(), "\n") + if applyLog != "" { + r.EventRecorder.Event(obj, + corev1.EventTypeNormal, + "ApplySucceeded", + applyLog) + } + + // Wait for the resources to become ready. + if obj.Spec.Wait && len(resultSet.Entries) > 0 { + if err := resourceManager.WaitForSet(resultSet.ToObjMetadataSet(), ssa.WaitOptions{ + Interval: 5 * time.Second, + Timeout: obj.GetTimeout(), + FailFast: true, + }); err != nil { + return err + } + log.Info("Health check completed") + } + + return nil +} + +// finalizeStatus updates the object status and conditions. +func (r *ResourceGroupReconciler) finalizeStatus(ctx context.Context, + obj *fluxcdv1.ResourceGroup, + patcher *patch.SerialPatcher) error { + // Set the value of the reconciliation request in status. + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.LastHandledReconcileAt = v + } + + // Set the Reconciling reason to ProgressingWithRetry if the + // reconciliation has failed. + if conditions.IsFalse(obj, meta.ReadyCondition) && + conditions.Has(obj, meta.ReconcilingCondition) { + rc := conditions.Get(obj, meta.ReconcilingCondition) + rc.Reason = meta.ProgressingWithRetryReason + conditions.Set(obj, rc) + } + + // Remove the Reconciling condition. + if conditions.IsTrue(obj, meta.ReadyCondition) || conditions.IsTrue(obj, meta.StalledCondition) { + conditions.Delete(obj, meta.ReconcilingCondition) + } + + // Patch finalizers, status and conditions. + return r.patch(ctx, obj, patcher) +} + +// uninstall deletes all the resources managed by the ResourceGroup. +// +//nolint:unparam +func (r *ResourceGroupReconciler) uninstall(ctx context.Context, + obj *fluxcdv1.ResourceGroup) (ctrl.Result, error) { + reconcileStart := time.Now() + log := ctrl.LoggerFrom(ctx) + + if obj.IsDisabled() || obj.Status.Inventory == nil || len(obj.Status.Inventory.Entries) == 0 { + controllerutil.RemoveFinalizer(obj, fluxcdv1.Finalizer) + return ctrl.Result{}, nil + } + + // Configure the Kubernetes client for impersonation. + impersonation := runtimeClient.NewImpersonator( + r.Client, + r.StatusPoller, + polling.Options{}, + nil, + runtimeClient.KubeConfigOptions{}, + r.DefaultServiceAccount, + obj.Spec.ServiceAccountName, + obj.GetNamespace(), + ) + + // Prune the managed resources if the service account is found. + if impersonation.CanImpersonate(ctx) { + kubeClient, _, err := impersonation.GetClient(ctx) + if err != nil { + return ctrl.Result{}, err + } + + resourceManager := ssa.NewResourceManager(kubeClient, nil, ssa.Owner{ + Field: r.StatusManager, + Group: fluxcdv1.GroupVersion.Group, + }) + + opts := ssa.DeleteOptions{ + PropagationPolicy: metav1.DeletePropagationBackground, + Inclusions: resourceManager.GetOwnerLabels(obj.Name, obj.Namespace), + Exclusions: map[string]string{ + fluxcdv1.PruneAnnotation: fluxcdv1.DisabledValue, + }, + } + + objects, _ := inventory.List(obj.Status.Inventory) + + changeSet, err := resourceManager.DeleteAll(ctx, objects, opts) + if err != nil { + log.Error(err, "pruning for deleted resource failed") + } + + msg := fmt.Sprintf("Uninstallation completed in %v", fmtDuration(reconcileStart)) + log.Info(msg, "output", changeSet.ToMap()) + } else { + log.Error(errors.New("service account not found"), "skip pruning for deleted resource") + } + + // Release the object to be garbage collected. + controllerutil.RemoveFinalizer(obj, fluxcdv1.Finalizer) + + // Stop reconciliation as the object is being deleted. + return ctrl.Result{}, nil +} + +// patch updates the object status, conditions and finalizers. +func (r *ResourceGroupReconciler) patch(ctx context.Context, + obj *fluxcdv1.ResourceGroup, + patcher *patch.SerialPatcher) (retErr error) { + // Configure the runtime patcher. + ownedConditions := []string{ + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + } + patchOpts := []patch.Option{ + patch.WithOwnedConditions{Conditions: ownedConditions}, + patch.WithForceOverwriteConditions{}, + patch.WithFieldOwner(r.StatusManager), + } + + // Patch the object status, conditions and finalizers. + if err := patcher.Patch(ctx, obj, patchOpts...); err != nil { + if !obj.GetDeletionTimestamp().IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + retErr = kerrors.NewAggregate([]error{retErr, err}) + if retErr != nil { + return retErr + } + } + + return nil +} + +// requeueAfterResourceGroup returns a ctrl.Result with the requeue time set to the +// interval specified in the object's annotations. +func requeueAfterResourceGroup(obj *fluxcdv1.ResourceGroup) ctrl.Result { + result := ctrl.Result{} + if obj.GetInterval() > 0 { + result.RequeueAfter = obj.GetInterval() + } + + return result +} diff --git a/internal/controller/resourcegroup_controller_test.go b/internal/controller/resourcegroup_controller_test.go new file mode 100644 index 0000000..45333ab --- /dev/null +++ b/internal/controller/resourcegroup_controller_test.go @@ -0,0 +1,440 @@ +// Copyright 2024 Stefan Prodan. +// SPDX-License-Identifier: AGPL-3.0 + +package controller + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/fluxcd/cli-utils/pkg/kstatus/polling" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" + + fluxcdv1 "github.com/controlplaneio-fluxcd/flux-operator/api/v1" +) + +func TestResourceGroupReconciler_LifeCycle(t *testing.T) { + g := NewWithT(t) + reconciler := getResourceGroupReconciler() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + ns, err := testEnv.CreateNamespace(ctx, "test") + g.Expect(err).ToNot(HaveOccurred()) + + objDef := fmt.Sprintf(` +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: tenants + namespace: "%[1]s" +spec: + commonMetadata: + annotations: + owner: "%[1]s" + inputs: + - tenant: team1 + - tenant: team2 + resources: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: << inputs.tenant >>-readonly + namespace: "%[1]s" + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: << inputs.tenant >>-readwrite + namespace: "%[1]s" +`, ns.Name) + + obj := &fluxcdv1.ResourceGroup{} + err = yaml.Unmarshal([]byte(objDef), obj) + g.Expect(err).ToNot(HaveOccurred()) + + // Initialize the instance. + err = testEnv.Create(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + + r, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Requeue).To(BeTrue()) + + // Check if the finalizer was added. + resultInit := &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), resultInit) + g.Expect(err).ToNot(HaveOccurred()) + + logObjectStatus(t, resultInit) + g.Expect(resultInit.Finalizers).To(ContainElement(fluxcdv1.Finalizer)) + + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Requeue).To(BeFalse()) + + // Check if the instance was installed. + result := &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), result) + g.Expect(err).ToNot(HaveOccurred()) + + logObjectStatus(t, result) + g.Expect(conditions.GetReason(result, meta.ReadyCondition)).To(BeIdenticalTo(meta.ReconciliationSucceededReason)) + + // Check if the inventory was updated. + g.Expect(result.Status.Inventory.Entries).To(HaveLen(4)) + g.Expect(result.Status.Inventory.Entries).To(ContainElements( + fluxcdv1.ResourceRef{ + ID: fmt.Sprintf("%s_team2-readonly__ServiceAccount", ns.Name), + Version: "v1", + }, + fluxcdv1.ResourceRef{ + ID: fmt.Sprintf("%s_team2-readwrite__ServiceAccount", ns.Name), + Version: "v1", + }, + )) + + // Check if the resources were created and labeled. + resultSA := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "team2-readwrite", + Namespace: ns.Name, + }, + } + err = testClient.Get(ctx, client.ObjectKeyFromObject(resultSA), resultSA) + g.Expect(err).ToNot(HaveOccurred()) + + expectedLabel := fmt.Sprintf("resourcegroup.%s", fluxcdv1.GroupVersion.Group) + g.Expect(resultSA.Labels).To(HaveKeyWithValue(expectedLabel+"/name", "tenants")) + g.Expect(resultSA.Labels).To(HaveKeyWithValue(expectedLabel+"/namespace", ns.Name)) + g.Expect(resultSA.Annotations).To(HaveKeyWithValue("owner", ns.Name)) + + // Check if events were recorded for each step. + events := getEvents(result.Name) + g.Expect(events).To(HaveLen(2)) + g.Expect(events[0].Reason).To(Equal("ApplySucceeded")) + g.Expect(events[0].Message).To(ContainSubstring("team1-readonly created")) + g.Expect(events[1].Reason).To(Equal(meta.ReconciliationSucceededReason)) + g.Expect(events[1].Message).To(HavePrefix("Reconciliation finished")) + + // Update the resource group. + resultP := result.DeepCopy() + resultP.SetAnnotations(map[string]string{ + fluxcdv1.ReconcileAnnotation: fluxcdv1.EnabledValue, + fluxcdv1.ReconcileEveryAnnotation: "1m", + }) + resultP.Spec.Resources = resultP.Spec.Resources[:len(resultP.Spec.Resources)-1] + + err = testClient.Patch(ctx, resultP, client.MergeFrom(result)) + g.Expect(err).ToNot(HaveOccurred()) + + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + + // Check if the instance was scheduled for reconciliation. + g.Expect(r.RequeueAfter).To(Equal(time.Minute)) + + // Check the final status. + resultFinal := &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), resultFinal) + g.Expect(err).ToNot(HaveOccurred()) + + // Check if the inventory was updated. + logObject(t, resultFinal) + g.Expect(resultFinal.Status.Inventory.Entries).To(HaveLen(2)) + g.Expect(resultFinal.Status.Inventory.Entries).ToNot(ContainElements( + fluxcdv1.ResourceRef{ + ID: fmt.Sprintf("%s_team2-readwrite__ServiceAccount", ns.Name), + Version: "v1", + }, + )) + g.Expect(resultFinal.Status.Inventory.Entries).To(ContainElements( + fluxcdv1.ResourceRef{ + ID: fmt.Sprintf("%s_team1-readonly__ServiceAccount", ns.Name), + Version: "v1", + }, + fluxcdv1.ResourceRef{ + ID: fmt.Sprintf("%s_team2-readonly__ServiceAccount", ns.Name), + Version: "v1", + }, + )) + + // Check if the resources were deleted. + err = testClient.Get(ctx, client.ObjectKeyFromObject(resultSA), resultSA) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + + // Delete the resource group. + err = testClient.Delete(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.IsZero()).To(BeTrue()) + + // Check if the resource group was finalized. + result = &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), result) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) +} + +func TestResourceGroupReconciler_DependsOn(t *testing.T) { + g := NewWithT(t) + reconciler := getResourceGroupReconciler() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + ns, err := testEnv.CreateNamespace(ctx, "test") + g.Expect(err).ToNot(HaveOccurred()) + + objDef := fmt.Sprintf(` +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: tenants + namespace: "%[1]s" +spec: + dependsOn: + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: fluxinstances.fluxcd.controlplane.io + ready: true + - apiVersion: v1 + kind: ServiceAccount + name: test + namespace: "%[1]s" + resources: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: readonly + namespace: "%[1]s" +`, ns.Name) + + obj := &fluxcdv1.ResourceGroup{} + err = yaml.Unmarshal([]byte(objDef), obj) + g.Expect(err).ToNot(HaveOccurred()) + + // Initialize the instance. + err = testEnv.Create(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + + r, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Requeue).To(BeTrue()) + + // Reconcile with not found dependency. + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.RequeueAfter).To(Equal(5 * time.Second)) + + // Check if the instance was installed. + result := &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), result) + g.Expect(err).ToNot(HaveOccurred()) + + logObjectStatus(t, result) + g.Expect(conditions.GetReason(result, meta.ReadyCondition)).To(BeIdenticalTo(meta.DependencyNotReadyReason)) + g.Expect(conditions.GetMessage(result, meta.ReadyCondition)).To(ContainSubstring("test not found")) + + // Create the dependency. + dep := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: ns.Name, + }, + } + + err = testClient.Create(ctx, dep) + g.Expect(err).ToNot(HaveOccurred()) + + // Reconcile with ready dependencies. + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + + // Check if the instance was installed. + resultFinal := &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), resultFinal) + g.Expect(err).ToNot(HaveOccurred()) + + logObjectStatus(t, resultFinal) + g.Expect(conditions.GetReason(resultFinal, meta.ReadyCondition)).To(BeIdenticalTo(meta.ReconciliationSucceededReason)) + + // Delete the resource group. + err = testClient.Delete(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.IsZero()).To(BeTrue()) +} + +func TestResourceGroupReconciler_Impersonation(t *testing.T) { + g := NewWithT(t) + reconciler := getResourceGroupReconciler() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Generate a kubeconfig for the testenv-admin user. + user, err := testEnv.AddUser(envtest.User{ + Name: "testenv-admin", + Groups: []string{"system:masters"}, + }, nil) + if err != nil { + panic(fmt.Sprintf("failed to create testenv-admin user: %v", err)) + } + + kubeConfig, err := user.KubeConfig() + if err != nil { + panic(fmt.Sprintf("failed to create the testenv-admin user kubeconfig: %v", err)) + } + + tmpDir := t.TempDir() + err = os.WriteFile(fmt.Sprintf("%s/kubeconfig", tmpDir), kubeConfig, 0644) + g.Expect(err).ToNot(HaveOccurred()) + + // Set the kubeconfig environment variable for the impersonator. + t.Setenv("KUBECONFIG", fmt.Sprintf("%s/kubeconfig", tmpDir)) + + ns, err := testEnv.CreateNamespace(ctx, "test") + g.Expect(err).ToNot(HaveOccurred()) + + objDef := fmt.Sprintf(` +apiVersion: fluxcd.controlplane.io/v1 +kind: ResourceGroup +metadata: + name: test + namespace: "%[1]s" +spec: + serviceAccountName: flux-operator + resources: + - apiVersion: v1 + kind: ConfigMap + metadata: + name: test + namespace: "%[1]s" +`, ns.Name) + + obj := &fluxcdv1.ResourceGroup{} + err = yaml.Unmarshal([]byte(objDef), obj) + g.Expect(err).ToNot(HaveOccurred()) + + // Initialize the instance. + err = testEnv.Create(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + + r, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.Requeue).To(BeTrue()) + + // Reconcile with missing service account. + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).To(HaveOccurred()) + + // Check if the instance was installed. + result := &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), result) + g.Expect(err).ToNot(HaveOccurred()) + + logObjectStatus(t, result) + g.Expect(conditions.GetReason(result, meta.ReadyCondition)).To(BeIdenticalTo(meta.ReconciliationFailedReason)) + + // Create the service account and role binding. + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "flux-operator", + Namespace: ns.Name, + }, + } + + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "flux-operator", + Namespace: ns.Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "flux-operator", + Namespace: ns.Name, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "cluster-admin", + }, + } + + err = testClient.Create(ctx, sa) + g.Expect(err).ToNot(HaveOccurred()) + err = testClient.Create(ctx, rb) + g.Expect(err).ToNot(HaveOccurred()) + + // Reconcile with existing service account. + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + + // Check if the instance was installed. + resultFinal := &fluxcdv1.ResourceGroup{} + err = testClient.Get(ctx, client.ObjectKeyFromObject(obj), resultFinal) + g.Expect(err).ToNot(HaveOccurred()) + + logObjectStatus(t, resultFinal) + g.Expect(conditions.GetReason(resultFinal, meta.ReadyCondition)).To(BeIdenticalTo(meta.ReconciliationSucceededReason)) + + // Delete the resource group. + err = testClient.Delete(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + + r, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(obj), + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.IsZero()).To(BeTrue()) +} + +func getResourceGroupReconciler() *ResourceGroupReconciler { + return &ResourceGroupReconciler{ + Client: testClient, + APIReader: testClient, + Scheme: NewTestScheme(), + StatusPoller: polling.NewStatusPoller(testClient, testEnv.GetRESTMapper(), polling.Options{}), + StatusManager: controllerName, + EventRecorder: testEnv.GetEventRecorderFor(controllerName), + } +} diff --git a/internal/controller/resourcegroup_manager.go b/internal/controller/resourcegroup_manager.go new file mode 100644 index 0000000..267b5f8 --- /dev/null +++ b/internal/controller/resourcegroup_manager.go @@ -0,0 +1,35 @@ +// Copyright 2024 Stefan Prodan. +// SPDX-License-Identifier: AGPL-3.0 + +package controller + +import ( + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + fluxcdv1 "github.com/controlplaneio-fluxcd/flux-operator/api/v1" +) + +// ResourceGroupReconcilerOptions contains options for the reconciler. +type ResourceGroupReconcilerOptions struct { + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ResourceGroupReconciler) SetupWithManager(mgr ctrl.Manager, opts ResourceGroupReconcilerOptions) error { + return ctrl.NewControllerManagedBy(mgr). + For(&fluxcdv1.ResourceGroup{}, + builder.WithPredicates( + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.AnnotationChangedPredicate{}, + ), + )). + WithOptions(controller.Options{ + RateLimiter: opts.RateLimiter, + }).Complete(r) +} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 831938a..413ba76 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -20,6 +20,7 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -45,6 +46,7 @@ var ( func NewTestScheme() *runtime.Scheme { s := runtime.NewScheme() utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(rbacv1.AddToScheme(s)) utilruntime.Must(appsv1.AddToScheme(s)) utilruntime.Must(apiextensionsv1.AddToScheme(s)) utilruntime.Must(fluxcdv1.AddToScheme(s)) diff --git a/test/e2e/instance_test.go b/test/e2e/instance_test.go index 9a64668..cb21435 100644 --- a/test/e2e/instance_test.go +++ b/test/e2e/instance_test.go @@ -17,7 +17,7 @@ var _ = Describe("FluxInstance", Ordered, func() { By("reconcile FluxInstance") verifyFluxInstanceReconcile := func() error { cmd := exec.Command("kubectl", "apply", - "-k", "config/samples", "-n", namespace, + "-f", "config/samples/fluxcd_v1_fluxinstance.yaml", "-n", namespace, ) _, err := Run(cmd, "/test/e2e") ExpectWithOffset(2, err).NotTo(HaveOccurred()) @@ -33,6 +33,31 @@ var _ = Describe("FluxInstance", Ordered, func() { }) }) + Context("resource group lifecycle", func() { + It("should run successfully", func() { + By("reconcile ResourceGroup") + reconcile := func() error { + cmd := exec.Command("kubectl", "apply", + "-f", "config/samples/fluxcd_v1_resourcegroup.yaml", + ) + _, err := Run(cmd, "/test/e2e") + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + + cmd = exec.Command("kubectl", "wait", "ResourceGroup/podinfo", + "--for=condition=Ready", "--timeout=5m", + ) + _, err = Run(cmd, "/test/e2e") + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + + cmd = exec.Command("kubectl", "delete", "ResourceGroup/podinfo") + _, err = Run(cmd, "/test/e2e") + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + return nil + } + EventuallyWithOffset(1, reconcile, 5*time.Minute, 10*time.Second).Should(Succeed()) + }) + }) + Context("upgrade", func() { It("should run successfully", func() { By("reconcile FluxInstance") @@ -65,7 +90,7 @@ var _ = Describe("FluxInstance", Ordered, func() { Context("uninstallation", func() { It("should run successfully", func() { By("delete FluxInstance") - cmd := exec.Command("kubectl", "delete", "-k", "config/samples", + cmd := exec.Command("kubectl", "delete", "FluxInstance/flux", "--timeout=30s", "-n", namespace) _, err := Run(cmd, "/test/e2e") Expect(err).NotTo(HaveOccurred()) diff --git a/test/olm/instance_test.go b/test/olm/instance_test.go index ac2b70d..6f3c1e7 100644 --- a/test/olm/instance_test.go +++ b/test/olm/instance_test.go @@ -38,7 +38,7 @@ var _ = Describe("FluxInstance", Ordered, func() { Context("uninstallation", func() { It("should run successfully", func() { By("delete FluxInstance") - cmd := exec.Command("kubectl", "delete", "-k", "config/samples", + cmd := exec.Command("kubectl", "delete", "FluxInstance/flux", "--timeout=30s", "-n", namespace) _, err := utils.Run(cmd, "/test/olm") Expect(err).NotTo(HaveOccurred())