diff --git a/api/v1alpha1/humiocluster_types.go b/api/v1alpha1/humiocluster_types.go index fd6c525a..fe3a4847 100644 --- a/api/v1alpha1/humiocluster_types.go +++ b/api/v1alpha1/humiocluster_types.go @@ -203,7 +203,10 @@ type HumioNodeSpec struct { // Deprecated: LogScale 1.70.0 deprecated this option, and was later removed in LogScale 1.80.0 NodeUUIDPrefix string `json:"nodeUUIDPrefix,omitempty"` - // ExtraKafkaConfigs is a multi-line string containing kafka properties + // ExtraKafkaConfigs is a multi-line string containing kafka properties. + // Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of + // LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection + // of new environment variables. For more details, see the LogScale release notes. ExtraKafkaConfigs string `json:"extraKafkaConfigs,omitempty"` // ExtraHumioVolumeMounts is the list of additional volume mounts that will be added to the Humio container diff --git a/charts/humio-operator/crds/core.humio.com_humioclusters.yaml b/charts/humio-operator/crds/core.humio.com_humioclusters.yaml index acf1a251..e0b0532f 100644 --- a/charts/humio-operator/crds/core.humio.com_humioclusters.yaml +++ b/charts/humio-operator/crds/core.humio.com_humioclusters.yaml @@ -3815,8 +3815,11 @@ spec: type: object type: array extraKafkaConfigs: - description: ExtraKafkaConfigs is a multi-line string containing kafka - properties + description: |- + ExtraKafkaConfigs is a multi-line string containing kafka properties. + Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of + LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection + of new environment variables. For more details, see the LogScale release notes. type: string extraVolumes: description: ExtraVolumes is the list of additional volumes that will @@ -9393,8 +9396,11 @@ spec: type: object type: array extraKafkaConfigs: - description: ExtraKafkaConfigs is a multi-line string containing - kafka properties + description: |- + ExtraKafkaConfigs is a multi-line string containing kafka properties. + Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of + LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection + of new environment variables. For more details, see the LogScale release notes. type: string extraVolumes: description: ExtraVolumes is the list of additional volumes diff --git a/config/crd/bases/core.humio.com_humioclusters.yaml b/config/crd/bases/core.humio.com_humioclusters.yaml index acf1a251..e0b0532f 100644 --- a/config/crd/bases/core.humio.com_humioclusters.yaml +++ b/config/crd/bases/core.humio.com_humioclusters.yaml @@ -3815,8 +3815,11 @@ spec: type: object type: array extraKafkaConfigs: - description: ExtraKafkaConfigs is a multi-line string containing kafka - properties + description: |- + ExtraKafkaConfigs is a multi-line string containing kafka properties. + Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of + LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection + of new environment variables. For more details, see the LogScale release notes. type: string extraVolumes: description: ExtraVolumes is the list of additional volumes that will @@ -9393,8 +9396,11 @@ spec: type: object type: array extraKafkaConfigs: - description: ExtraKafkaConfigs is a multi-line string containing - kafka properties + description: |- + ExtraKafkaConfigs is a multi-line string containing kafka properties. + Deprecated: This underlying LogScale environment variable used by this field has been marked deprecated as of + LogScale 1.173.0. Going forward, it is possible to provide additional Kafka configuration through a collection + of new environment variables. For more details, see the LogScale release notes. type: string extraVolumes: description: ExtraVolumes is the list of additional volumes diff --git a/controllers/humiocluster_controller.go b/controllers/humiocluster_controller.go index 1b4fb0c6..2f093969 100644 --- a/controllers/humiocluster_controller.go +++ b/controllers/humiocluster_controller.go @@ -188,8 +188,6 @@ func (r *HumioClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request r.ensureValidCAIssuer, r.ensureHumioClusterCACertBundle, r.ensureHumioClusterKeystoreSecret, - r.ensureViewGroupPermissionsConfigMap, - r.ensureRolePermissionsConfigMap, r.ensureNoIngressesIfIngressNotEnabled, // TODO: cleanupUnusedResources seems like a better place for this r.ensureIngress, } { @@ -205,6 +203,8 @@ func (r *HumioClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request r.ensureInitContainerPermissions, r.ensureHumioNodeCertificates, r.ensureExtraKafkaConfigsConfigMap, + r.ensureViewGroupPermissionsConfigMap, + r.ensureRolePermissionsConfigMap, } { if err := fun(ctx, hc, pool); err != nil { return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions(). @@ -448,13 +448,6 @@ func (r *HumioClusterReconciler) validateNodeCount(hc *humiov1alpha1.HumioCluste func (r *HumioClusterReconciler) ensureExtraKafkaConfigsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error { extraKafkaConfigsConfigMapData := hnp.GetExtraKafkaConfigs() if extraKafkaConfigsConfigMapData == "" { - extraKafkaConfigsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetExtraKafkaConfigsConfigMapName(), hc.Namespace) - if err == nil { - // TODO: refactor and move deletion to cleanupUnusedResources - if err = r.Delete(ctx, &extraKafkaConfigsConfigMap); err != nil { - r.Log.Error(err, "unable to delete extra kafka configs configmap") - } - } return nil } @@ -550,21 +543,14 @@ func (r *HumioClusterReconciler) setImageFromSource(ctx context.Context, hnp *Hu // ensureViewGroupPermissionsConfigMap creates a configmap containing configs specified in viewGroupPermissions which will be mounted // into the Humio container and used by Humio's configuration option READ_GROUP_PERMISSIONS_FROM_FILE -func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster) error { - viewGroupPermissionsConfigMapData := viewGroupPermissionsOrDefault(hc) +func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error { + viewGroupPermissionsConfigMapData := hnp.GetViewGroupPermissions() if viewGroupPermissionsConfigMapData == "" { - viewGroupPermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, ViewGroupPermissionsConfigMapName(hc), hc.Namespace) - if err == nil { - // TODO: refactor and move deletion to cleanupUnusedResources - if err = r.Delete(ctx, &viewGroupPermissionsConfigMap); err != nil { - r.Log.Error(err, "unable to delete view group permissions configmap") - } - } return nil } desiredConfigMap := kubernetes.ConstructViewGroupPermissionsConfigMap( - ViewGroupPermissionsConfigMapName(hc), + hnp.GetViewGroupPermissionsConfigMapName(), ViewGroupPermissionsFilename, viewGroupPermissionsConfigMapData, hc.Name, @@ -574,7 +560,7 @@ func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context return r.logErrorAndReturn(err, "could not set controller reference") } - existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, ViewGroupPermissionsConfigMapName(hc), hc.Namespace) + existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetViewGroupPermissionsConfigMapName(), hc.Namespace) if err != nil { if k8serrors.IsNotFound(err) { r.Log.Info(fmt.Sprintf("creating configMap: %s", desiredConfigMap.Name)) @@ -600,21 +586,14 @@ func (r *HumioClusterReconciler) ensureViewGroupPermissionsConfigMap(ctx context // ensureRolePermissionsConfigMap creates a configmap containing configs specified in rolePermissions which will be mounted // into the Humio container and used by Humio's configuration option READ_GROUP_PERMISSIONS_FROM_FILE -func (r *HumioClusterReconciler) ensureRolePermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster) error { - rolePermissionsConfigMapData := rolePermissionsOrDefault(hc) +func (r *HumioClusterReconciler) ensureRolePermissionsConfigMap(ctx context.Context, hc *humiov1alpha1.HumioCluster, hnp *HumioNodePool) error { + rolePermissionsConfigMapData := hnp.GetRolePermissions() if rolePermissionsConfigMapData == "" { - rolePermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, RolePermissionsConfigMapName(hc), hc.Namespace) - if err == nil { - // TODO: refactor and move deletion to cleanupUnusedResources - if err = r.Delete(ctx, &rolePermissionsConfigMap); err != nil { - return fmt.Errorf("unable to delete role permissions configmap") - } - } return nil } desiredConfigMap := kubernetes.ConstructRolePermissionsConfigMap( - RolePermissionsConfigMapName(hc), + hnp.GetRolePermissionsConfigMapName(), RolePermissionsFilename, rolePermissionsConfigMapData, hc.Name, @@ -624,7 +603,7 @@ func (r *HumioClusterReconciler) ensureRolePermissionsConfigMap(ctx context.Cont return r.logErrorAndReturn(err, "could not set controller reference") } - existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, RolePermissionsConfigMapName(hc), hc.Namespace) + existingConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetRolePermissionsConfigMapName(), hc.Namespace) if err != nil { if k8serrors.IsNotFound(err) { r.Log.Info(fmt.Sprintf("creating configMap: %s", desiredConfigMap.Name)) @@ -2292,11 +2271,47 @@ func (r *HumioClusterReconciler) verifyHumioClusterConfigurationIsValid(ctx cont } func (r *HumioClusterReconciler) cleanupUnusedResources(ctx context.Context, hc *humiov1alpha1.HumioCluster, humioNodePools HumioNodePoolList) (reconcile.Result, error) { - for _, pool := range humioNodePools.Items { - if err := r.ensureOrphanedPvcsAreDeleted(ctx, hc, pool); err != nil { + for _, hnp := range humioNodePools.Items { + if err := r.ensureOrphanedPvcsAreDeleted(ctx, hc, hnp); err != nil { return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions(). withMessage(err.Error())) } + + if hnp.GetExtraKafkaConfigs() == "" { + extraKafkaConfigsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetExtraKafkaConfigsConfigMapName(), hc.Namespace) + if err == nil { + if err = r.Delete(ctx, &extraKafkaConfigsConfigMap); err != nil { + return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions(). + withMessage(err.Error())) + } + } + } + } + + for _, hnp := range humioNodePools.Items { + if hnp.GetViewGroupPermissions() == "" { + viewGroupPermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetViewGroupPermissionsConfigMapName(), hc.Namespace) + if err == nil { + if err = r.Delete(ctx, &viewGroupPermissionsConfigMap); err != nil { + return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions(). + withMessage(err.Error())) + } + break // only need to delete it once, since all node pools reference the same underlying configmap + } + } + } + + for _, hnp := range humioNodePools.Items { + if hnp.GetRolePermissions() == "" { + rolePermissionsConfigMap, err := kubernetes.GetConfigMap(ctx, r, hnp.GetRolePermissionsConfigMapName(), hc.Namespace) + if err == nil { + if err = r.Delete(ctx, &rolePermissionsConfigMap); err != nil { + return r.updateStatus(ctx, r.Client.Status(), hc, statusOptions(). + withMessage(err.Error())) + } + break // only need to delete it once, since all node pools reference the same underlying configmap + } + } } for _, nodePool := range humioNodePools.Filter(NodePoolFilterDoesNotHaveNodes) { diff --git a/controllers/humiocluster_defaults.go b/controllers/humiocluster_defaults.go index a3e69010..69162563 100644 --- a/controllers/humiocluster_defaults.go +++ b/controllers/humiocluster_defaults.go @@ -881,22 +881,6 @@ func (hnp *HumioNodePool) GetNodePoolFeatureAllowedAPIRequestTypes() []string { return []string{NodePoolFeatureAllowedAPIRequestType} } -func viewGroupPermissionsOrDefault(hc *humiov1alpha1.HumioCluster) string { - return hc.Spec.ViewGroupPermissions -} - -func ViewGroupPermissionsConfigMapName(hc *humiov1alpha1.HumioCluster) string { - return fmt.Sprintf("%s-%s", hc.Name, viewGroupPermissionsConfigMapNameSuffix) -} - -func rolePermissionsOrDefault(hc *humiov1alpha1.HumioCluster) string { - return hc.Spec.RolePermissions -} - -func RolePermissionsConfigMapName(hc *humiov1alpha1.HumioCluster) string { - return fmt.Sprintf("%s-%s", hc.Name, rolePermissionsConfigMapNameSuffix) -} - func AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVars []corev1.EnvVar, defaultEnvVar corev1.EnvVar) []corev1.EnvVar { for _, envVar := range envVars { if envVar.Name == defaultEnvVar.Name { diff --git a/controllers/suite/clusters/humiocluster_controller_test.go b/controllers/suite/clusters/humiocluster_controller_test.go index 7482848e..157c620c 100644 --- a/controllers/suite/clusters/humiocluster_controller_test.go +++ b/controllers/suite/clusters/humiocluster_controller_test.go @@ -2993,7 +2993,7 @@ var _ = Describe("HumioCluster Controller", func() { suite.UsingClusterBy(key.Name, "Confirming config map was created") Eventually(func() error { - _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), toCreate.Namespace) + _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), toCreate.Namespace) return err }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -3017,7 +3017,7 @@ var _ = Describe("HumioCluster Controller", func() { VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: controllers.ViewGroupPermissionsConfigMapName(toCreate), + Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), }, DefaultMode: &mode, }, @@ -3026,7 +3026,7 @@ var _ = Describe("HumioCluster Controller", func() { } suite.UsingClusterBy(key.Name, "Confirming config map contains desired view group permissions") - configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), key.Namespace) + configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), key.Namespace) Expect(configMap.Data[controllers.ViewGroupPermissionsFilename]).To(Equal(toCreate.Spec.ViewGroupPermissions)) var updatedHumioCluster humiov1alpha1.HumioCluster @@ -3051,7 +3051,7 @@ var _ = Describe("HumioCluster Controller", func() { }, testTimeout, suite.TestInterval).Should(Succeed()) Eventually(func() string { - configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), key.Namespace) + configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), key.Namespace) return configMap.Data[controllers.ViewGroupPermissionsFilename] }, testTimeout, suite.TestInterval).Should(Equal(updatedViewGroupPermissions)) @@ -3105,7 +3105,7 @@ var _ = Describe("HumioCluster Controller", func() { VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: controllers.ViewGroupPermissionsConfigMapName(toCreate), + Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), }, DefaultMode: &mode, }, @@ -3114,7 +3114,10 @@ var _ = Describe("HumioCluster Controller", func() { suite.UsingClusterBy(key.Name, "Confirming config map was cleaned up") Eventually(func() bool { - _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), toCreate.Namespace) + clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels()) + _ = suite.MarkPodsAsRunningIfUsingEnvtest(ctx, k8sClient, clusterPods, key.Name) + + _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetViewGroupPermissionsConfigMapName(), toCreate.Namespace) return k8serrors.IsNotFound(err) }, testTimeout, suite.TestInterval).Should(BeTrue()) }) @@ -3188,7 +3191,7 @@ var _ = Describe("HumioCluster Controller", func() { suite.UsingClusterBy(key.Name, "Confirming config map was created") Eventually(func() error { - _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), toCreate.Namespace) + _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), toCreate.Namespace) return err }, testTimeout, suite.TestInterval).Should(Succeed()) @@ -3212,7 +3215,7 @@ var _ = Describe("HumioCluster Controller", func() { VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: controllers.RolePermissionsConfigMapName(toCreate), + Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), }, DefaultMode: &mode, }, @@ -3221,7 +3224,7 @@ var _ = Describe("HumioCluster Controller", func() { } suite.UsingClusterBy(key.Name, "Confirming config map contains desired role permissions") - configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), key.Namespace) + configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), key.Namespace) Expect(configMap.Data[controllers.RolePermissionsFilename]).To(Equal(toCreate.Spec.RolePermissions)) var updatedHumioCluster humiov1alpha1.HumioCluster @@ -3289,7 +3292,7 @@ var _ = Describe("HumioCluster Controller", func() { }, testTimeout, suite.TestInterval).Should(Succeed()) Eventually(func() string { - configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), key.Namespace) + configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), key.Namespace) return configMap.Data[controllers.RolePermissionsFilename] }, testTimeout, suite.TestInterval).Should(Equal(updatedRolePermissions)) @@ -3343,7 +3346,7 @@ var _ = Describe("HumioCluster Controller", func() { VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: controllers.RolePermissionsConfigMapName(toCreate), + Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), }, DefaultMode: &mode, }, @@ -3352,7 +3355,10 @@ var _ = Describe("HumioCluster Controller", func() { suite.UsingClusterBy(key.Name, "Confirming config map was cleaned up") Eventually(func() bool { - _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.RolePermissionsConfigMapName(toCreate), toCreate.Namespace) + clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels()) + _ = suite.MarkPodsAsRunningIfUsingEnvtest(ctx, k8sClient, clusterPods, key.Name) + + _, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetRolePermissionsConfigMapName(), toCreate.Namespace) return k8serrors.IsNotFound(err) }, testTimeout, suite.TestInterval).Should(BeTrue()) })