Skip to content

Commit

Permalink
Deprecate the ExtraKafkaConfigs field and move cleanup of configmaps …
Browse files Browse the repository at this point in the history
…to cleanup function
  • Loading branch information
SaaldjorMike committed Jan 29, 2025
1 parent f166ad8 commit f9cba55
Show file tree
Hide file tree
Showing 6 changed files with 90 additions and 70 deletions.
5 changes: 4 additions & 1 deletion api/v1alpha1/humiocluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,10 @@ type HumioNodeSpec struct {
// Deprecated: LogScale 1.70.0 deprecated this option, and was later removed in LogScale 1.80.0
NodeUUIDPrefix string `json:"nodeUUIDPrefix,omitempty"`

// ExtraKafkaConfigs is a multi-line string containing kafka properties
// ExtraKafkaConfigs is a multi-line string containing kafka properties.
// Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of
// LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection
// of new environment variables. For more details, see the LogScale release notes.
ExtraKafkaConfigs string `json:"extraKafkaConfigs,omitempty"`

// ExtraHumioVolumeMounts is the list of additional volume mounts that will be added to the Humio container
Expand Down
14 changes: 10 additions & 4 deletions charts/humio-operator/crds/core.humio.com_humioclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3815,8 +3815,11 @@ spec:
type: object
type: array
extraKafkaConfigs:
description: ExtraKafkaConfigs is a multi-line string containing kafka
properties
description: |-
ExtraKafkaConfigs is a multi-line string containing kafka properties.
Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of
LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection
of new environment variables. For more details, see the LogScale release notes.
type: string
extraVolumes:
description: ExtraVolumes is the list of additional volumes that will
Expand Down Expand Up @@ -9393,8 +9396,11 @@ spec:
type: object
type: array
extraKafkaConfigs:
description: ExtraKafkaConfigs is a multi-line string containing
kafka properties
description: |-
ExtraKafkaConfigs is a multi-line string containing kafka properties.
Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of
LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection
of new environment variables. For more details, see the LogScale release notes.
type: string
extraVolumes:
description: ExtraVolumes is the list of additional volumes
Expand Down
14 changes: 10 additions & 4 deletions config/crd/bases/core.humio.com_humioclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3815,8 +3815,11 @@ spec:
type: object
type: array
extraKafkaConfigs:
description: ExtraKafkaConfigs is a multi-line string containing kafka
properties
description: |-
ExtraKafkaConfigs is a multi-line string containing kafka properties.
Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of
LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection
of new environment variables. For more details, see the LogScale release notes.
type: string
extraVolumes:
description: ExtraVolumes is the list of additional volumes that will
Expand Down Expand Up @@ -9393,8 +9396,11 @@ spec:
type: object
type: array
extraKafkaConfigs:
description: ExtraKafkaConfigs is a multi-line string containing
kafka properties
description: |-
ExtraKafkaConfigs is a multi-line string containing kafka properties.
Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of
LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection
of new environment variables. For more details, see the LogScale release notes.
type: string
extraVolumes:
description: ExtraVolumes is the list of additional volumes
Expand Down
81 changes: 48 additions & 33 deletions controllers/humiocluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,6 @@ func (r *HumioClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
r.ensureValidCAIssuer,
r.ensureHumioClusterCACertBundle,
r.ensureHumioClusterKeystoreSecret,
r.ensureViewGroupPermissionsConfigMap,
r.ensureRolePermissionsConfigMap,
r.ensureNoIngressesIfIngressNotEnabled, // TODO: cleanupUnusedResources seems like a better place for this
r.ensureIngress,
} {
Expand All @@ -205,6 +203,8 @@ func (r *HumioClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
r.ensureInitContainerPermissions,
r.ensureHumioNodeCertificates,
r.ensureExtraKafkaConfigsConfigMap,
r.ensureViewGroupPermissionsConfigMap,
r.ensureRolePermissionsConfigMap,
} {
if err := fun(ctx, hc, pool); err != nil {
return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions().
Expand Down Expand Up @@ -448,13 +448,6 @@ func (r *HumioClusterReconciler) validateNodeCount(hc *humiov1alpha1.HumioCluste
func (r *HumioClusterReconciler) ensureExtraKafkaConfigsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error {
extraKafkaConfigsConfigMapData := hnp.GetExtraKafkaConfigs()
if extraKafkaConfigsConfigMapData == "" {
extraKafkaConfigsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetExtraKafkaConfigsConfigMapName(), hc.Namespace)
if err == nil {
// TODO: refactor and move deletion to cleanupUnusedResources
if err = r.Delete(ctx, &extraKafkaConfigsConfigMap); err != nil {
r.Log.Error(err, "unable to delete extra kafka configs configmap")
}
}
return nil
}

Expand Down Expand Up @@ -550,21 +543,14 @@ func (r *HumioClusterReconciler) setImageFromSource(ctx context.Context, hnp *Hu

// ensureViewGroupPermissionsConfigMap creates a configmap containing configs specified in viewGroupPermissions which will be mounted
// into the Humio container and used by Humio's configuration option READ_GROUP_PERMISSIONS_FROM_FILE
func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster) error {
viewGroupPermissionsConfigMapData := viewGroupPermissionsOrDefault(hc)
func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error {
viewGroupPermissionsConfigMapData := hnp.GetViewGroupPermissions()
if viewGroupPermissionsConfigMapData == "" {
viewGroupPermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, ViewGroupPermissionsConfigMapName(hc), hc.Namespace)
if err == nil {
// TODO: refactor and move deletion to cleanupUnusedResources
if err = r.Delete(ctx, &viewGroupPermissionsConfigMap); err != nil {
r.Log.Error(err, "unable to delete view group permissions configmap")
}
}
return nil
}

desiredConfigMap := kubernetes.ConstructViewGroupPermissionsConfigMap(
ViewGroupPermissionsConfigMapName(hc),
hnp.GetViewGroupPermissionsConfigMapName(),
ViewGroupPermissionsFilename,
viewGroupPermissionsConfigMapData,
hc.Name,
Expand All @@ -574,7 +560,7 @@ func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context
return r.logErrorAndReturn(err, "could not set controller reference")
}

existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, ViewGroupPermissionsConfigMapName(hc), hc.Namespace)
existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetViewGroupPermissionsConfigMapName(), hc.Namespace)
if err != nil {
if k8serrors.IsNotFound(err) {
r.Log.Info(fmt.Sprintf("creating configMap: %s", desiredConfigMap.Name))
Expand All @@ -600,21 +586,14 @@ func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context

// ensureRolePermissionsConfigMap creates a configmap containing configs specified in rolePermissions which will be mounted
// into the Humio container and used by Humio's configuration option READ_GROUP_PERMISSIONS_FROM_FILE
func (r *HumioClusterReconciler) ensureRolePermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster) error {
rolePermissionsConfigMapData := rolePermissionsOrDefault(hc)
func (r *HumioClusterReconciler) ensureRolePermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error {
rolePermissionsConfigMapData := hnp.GetRolePermissions()
if rolePermissionsConfigMapData == "" {
rolePermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, RolePermissionsConfigMapName(hc), hc.Namespace)
if err == nil {
// TODO: refactor and move deletion to cleanupUnusedResources
if err = r.Delete(ctx, &rolePermissionsConfigMap); err != nil {
return fmt.Errorf("unable to delete role permissions configmap")
}
}
return nil
}

desiredConfigMap := kubernetes.ConstructRolePermissionsConfigMap(
RolePermissionsConfigMapName(hc),
hnp.GetRolePermissionsConfigMapName(),
RolePermissionsFilename,
rolePermissionsConfigMapData,
hc.Name,
Expand All @@ -624,7 +603,7 @@ func (r *HumioClusterReconciler) ensureRolePermissionsConfigMap(ctx context.Cont
return r.logErrorAndReturn(err, "could not set controller reference")
}

existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, RolePermissionsConfigMapName(hc), hc.Namespace)
existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetRolePermissionsConfigMapName(), hc.Namespace)
if err != nil {
if k8serrors.IsNotFound(err) {
r.Log.Info(fmt.Sprintf("creating configMap: %s", desiredConfigMap.Name))
Expand Down Expand Up @@ -2292,11 +2271,47 @@ func (r *HumioClusterReconciler) verifyHumioClusterConfigurationIsValid(ctx cont
}

func (r *HumioClusterReconciler) cleanupUnusedResources(ctx context.Context, hc *humiov1alpha1.HumioCluster, humioNodePools HumioNodePoolList) (reconcile.Result, error) {
for _, pool := range humioNodePools.Items {
if err := r.ensureOrphanedPvcsAreDeleted(ctx, hc, pool); err != nil {
for _, hnp := range humioNodePools.Items {
if err := r.ensureOrphanedPvcsAreDeleted(ctx, hc, hnp); err != nil {
return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions().
withMessage(err.Error()))
}

if hnp.GetExtraKafkaConfigs() == "" {
extraKafkaConfigsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetExtraKafkaConfigsConfigMapName(), hc.Namespace)
if err == nil {
if err = r.Delete(ctx, &extraKafkaConfigsConfigMap); err != nil {
return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions().
withMessage(err.Error()))
}
}
}
}

for _, hnp := range humioNodePools.Items {
if hnp.GetViewGroupPermissions() == "" {
viewGroupPermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetViewGroupPermissionsConfigMapName(), hc.Namespace)
if err == nil {
if err = r.Delete(ctx, &viewGroupPermissionsConfigMap); err != nil {
return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions().
withMessage(err.Error()))
}
break // only need to delete it once, since all node pools reference the same underlying configmap
}
}
}

for _, hnp := range humioNodePools.Items {
if hnp.GetRolePermissions() == "" {
rolePermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetRolePermissionsConfigMapName(), hc.Namespace)
if err == nil {
if err = r.Delete(ctx, &rolePermissionsConfigMap); err != nil {
return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions().
withMessage(err.Error()))
}
break // only need to delete it once, since all node pools reference the same underlying configmap
}
}
}

for _, nodePool := range humioNodePools.Filter(NodePoolFilterDoesNotHaveNodes) {
Expand Down
16 changes: 0 additions & 16 deletions controllers/humiocluster_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -881,22 +881,6 @@ func (hnp *HumioNodePool) GetNodePoolFeatureAllowedAPIRequestTypes() []string {
return []string{NodePoolFeatureAllowedAPIRequestType}
}

func viewGroupPermissionsOrDefault(hc *humiov1alpha1.HumioCluster) string {
return hc.Spec.ViewGroupPermissions
}

func ViewGroupPermissionsConfigMapName(hc *humiov1alpha1.HumioCluster) string {
return fmt.Sprintf("%s-%s", hc.Name, viewGroupPermissionsConfigMapNameSuffix)
}

func rolePermissionsOrDefault(hc *humiov1alpha1.HumioCluster) string {
return hc.Spec.RolePermissions
}

func RolePermissionsConfigMapName(hc *humiov1alpha1.HumioCluster) string {
return fmt.Sprintf("%s-%s", hc.Name, rolePermissionsConfigMapNameSuffix)
}

func AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVars []corev1.EnvVar, defaultEnvVar corev1.EnvVar) []corev1.EnvVar {
for _, envVar := range envVars {
if envVar.Name == defaultEnvVar.Name {
Expand Down
30 changes: 18 additions & 12 deletions controllers/suite/clusters/humiocluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2993,7 +2993,7 @@ var _ = Describe("HumioCluster Controller", func() {

suite.UsingClusterBy(key.Name, "Confirming config map was created")
Eventually(func() error {
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), toCreate.Namespace)
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), toCreate.Namespace)
return err
}, testTimeout, suite.TestInterval).Should(Succeed())

Expand All @@ -3017,7 +3017,7 @@ var _ = Describe("HumioCluster Controller", func() {
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.ViewGroupPermissionsConfigMapName(toCreate),
Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(),
},
DefaultMode: &mode,
},
Expand All @@ -3026,7 +3026,7 @@ var _ = Describe("HumioCluster Controller", func() {
}

suite.UsingClusterBy(key.Name, "Confirming config map contains desired view group permissions")
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), key.Namespace)
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), key.Namespace)
Expect(configMap.Data[controllers.ViewGroupPermissionsFilename]).To(Equal(toCreate.Spec.ViewGroupPermissions))

var updatedHumioCluster humiov1alpha1.HumioCluster
Expand All @@ -3051,7 +3051,7 @@ var _ = Describe("HumioCluster Controller", func() {
}, testTimeout, suite.TestInterval).Should(Succeed())

Eventually(func() string {
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), key.Namespace)
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), key.Namespace)
return configMap.Data[controllers.ViewGroupPermissionsFilename]
}, testTimeout, suite.TestInterval).Should(Equal(updatedViewGroupPermissions))

Expand Down Expand Up @@ -3105,7 +3105,7 @@ var _ = Describe("HumioCluster Controller", func() {
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.ViewGroupPermissionsConfigMapName(toCreate),
Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(),
},
DefaultMode: &mode,
},
Expand All @@ -3114,7 +3114,10 @@ var _ = Describe("HumioCluster Controller", func() {

suite.UsingClusterBy(key.Name, "Confirming config map was cleaned up")
Eventually(func() bool {
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), toCreate.Namespace)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
_ = suite.MarkPodsAsRunningIfUsingEnvtest(ctx, k8sClient, clusterPods, key.Name)

_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), toCreate.Namespace)
return k8serrors.IsNotFound(err)
}, testTimeout, suite.TestInterval).Should(BeTrue())
})
Expand Down Expand Up @@ -3188,7 +3191,7 @@ var _ = Describe("HumioCluster Controller", func() {

suite.UsingClusterBy(key.Name, "Confirming config map was created")
Eventually(func() error {
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), toCreate.Namespace)
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), toCreate.Namespace)
return err
}, testTimeout, suite.TestInterval).Should(Succeed())

Expand All @@ -3212,7 +3215,7 @@ var _ = Describe("HumioCluster Controller", func() {
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.RolePermissionsConfigMapName(toCreate),
Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(),
},
DefaultMode: &mode,
},
Expand All @@ -3221,7 +3224,7 @@ var _ = Describe("HumioCluster Controller", func() {
}

suite.UsingClusterBy(key.Name, "Confirming config map contains desired role permissions")
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), key.Namespace)
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), key.Namespace)
Expect(configMap.Data[controllers.RolePermissionsFilename]).To(Equal(toCreate.Spec.RolePermissions))

var updatedHumioCluster humiov1alpha1.HumioCluster
Expand Down Expand Up @@ -3289,7 +3292,7 @@ var _ = Describe("HumioCluster Controller", func() {
}, testTimeout, suite.TestInterval).Should(Succeed())

Eventually(func() string {
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), key.Namespace)
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), key.Namespace)
return configMap.Data[controllers.RolePermissionsFilename]
}, testTimeout, suite.TestInterval).Should(Equal(updatedRolePermissions))

Expand Down Expand Up @@ -3343,7 +3346,7 @@ var _ = Describe("HumioCluster Controller", func() {
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.RolePermissionsConfigMapName(toCreate),
Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(),
},
DefaultMode: &mode,
},
Expand All @@ -3352,7 +3355,10 @@ var _ = Describe("HumioCluster Controller", func() {

suite.UsingClusterBy(key.Name, "Confirming config map was cleaned up")
Eventually(func() bool {
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), toCreate.Namespace)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
_ = suite.MarkPodsAsRunningIfUsingEnvtest(ctx, k8sClient, clusterPods, key.Name)

_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), toCreate.Namespace)
return k8serrors.IsNotFound(err)
}, testTimeout, suite.TestInterval).Should(BeTrue())
})
Expand Down

0 comments on commit f9cba55

Please sign in to comment.