From 3c065b9cdfcb6995c5792c758885e0f23a500c73 Mon Sep 17 00:00:00 2001 From: Joel Takvorian Date: Wed, 4 Oct 2023 10:32:18 +0200 Subject: [PATCH] NETOBSERV-1284 implement metrics white-listing - Introduce CRD field processor.metrics.includeList - Deprecate CRD field processor.metrics.ignoreTags - Convert ignoreTags to includeList approach when includeList isn't set - If includeList isn't set and ignoreTags == default tags in 1.4, default includeList will be used. This should allow a smooth transitioning if more metrics are added in 1.5 - Some code refactoring to move away from embedded metrics defs - this will help to prepare exposing the metrics creation API, and avoid having to define every metric permutation one by one (egress/ingress, bytes/packets, node/ns/workload) - Fixing an issue with the Health dashboard not showing some metrics (previously tagged as "flows") despite they are always present disambiguate package name Fix include inner direction in metrics Rebased / adapt to v1beta2 - In Conversion webhooks, use the per-field dedicated functions to integrate conversion logic - Add tests on conversion webhooks - Automate generation of hack CRD - Modify Health dashboard tagging: just a single tag "dynamic" is sufficient to tell whether we need to check for metric included --- api/v1alpha1/flowcollector_webhook.go | 16 ++ api/v1alpha1/zz_generated.conversion.go | 19 +-- api/v1beta1/flowcollector_types.go | 17 +- api/v1beta1/flowcollector_webhook.go | 28 ++- api/v1beta1/flowcollector_webhook_test.go | 160 ++++++++++++++++++ api/v1beta1/zz_generated.conversion.go | 20 +-- api/v1beta1/zz_generated.deepcopy.go | 9 + api/v1beta2/flowcollector_types.go | 15 +- api/v1beta2/zz_generated.deepcopy.go | 12 +- .../flows.netobserv.io_flowcollectors.yaml | 55 +++--- ...observ-operator.clusterserviceversion.yaml | 24 +-- .../flows.netobserv.io_flowcollectors.yaml | 55 +++--- .../samples/flows_v1beta1_flowcollector.yaml | 13 +- .../samples/flows_v1beta2_flowcollector.yaml | 17 +- controllers/flowcollector_controller.go | 5 +- .../flowcollector_controller_iso_test.go | 1 - controllers/flowcollector_controller_test.go | 18 +- controllers/flowcollector_objects.go | 15 +- .../flowlogspipeline/flp_common_objects.go | 54 +----- controllers/flowlogspipeline/flp_test.go | 48 +++--- .../namespace_egress_bytes_total.yaml | 26 --- .../namespace_egress_packets_total.yaml | 26 --- .../namespace_flows_total.yaml | 20 --- .../namespace_ingress_bytes_total.yaml | 26 --- .../namespace_ingress_packets_total.yaml | 26 --- .../node_egress_bytes_total.yaml | 26 --- .../node_egress_packets_total.yaml | 26 --- .../metrics_definitions/node_flows_total.yaml | 20 --- .../node_ingress_bytes_total.yaml | 26 --- .../node_ingress_packets_total.yaml | 26 --- .../workload_egress_bytes_total.yaml | 30 ---- .../workload_egress_packets_total.yaml | 30 ---- .../workload_flows_total.yaml | 24 --- .../workload_ingress_bytes_total.yaml | 30 ---- .../workload_ingress_packets_total.yaml | 30 ---- docs/FlowCollector.md | 15 +- ...ned.flows.netobserv.io_flowcollectors.yaml | 18 +- pkg/{helper => dashboards}/dashboard.go | 74 ++------ pkg/{helper => dashboards}/dashboard_test.go | 117 +++++++------ pkg/dashboards/health.go | 77 +++++++++ .../dashboards}/infra_health_dashboard.json | 25 +-- pkg/helper/flowcollector.go | 8 + pkg/metrics/predefined_metrics.go | 133 +++++++++++++++ pkg/metrics/predefined_metrics_test.go | 43 +++++ pkg/test/dashboards.go | 33 ++++ 45 files changed, 782 insertions(+), 754 deletions(-) create mode 100644 api/v1beta1/flowcollector_webhook_test.go delete mode 100644 controllers/flowlogspipeline/metrics_definitions/namespace_egress_bytes_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/namespace_egress_packets_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/namespace_flows_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/namespace_ingress_bytes_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/namespace_ingress_packets_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/node_egress_bytes_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/node_egress_packets_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/node_flows_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/node_ingress_bytes_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/node_ingress_packets_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/workload_egress_bytes_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/workload_egress_packets_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/workload_flows_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/workload_ingress_bytes_total.yaml delete mode 100644 controllers/flowlogspipeline/metrics_definitions/workload_ingress_packets_total.yaml rename pkg/{helper => dashboards}/dashboard.go (79%) rename pkg/{helper => dashboards}/dashboard_test.go (78%) create mode 100644 pkg/dashboards/health.go rename {controllers => pkg/dashboards}/infra_health_dashboard.json (98%) create mode 100644 pkg/metrics/predefined_metrics.go create mode 100644 pkg/metrics/predefined_metrics_test.go create mode 100644 pkg/test/dashboards.go diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index d4d96e4e7..45dabf072 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -22,6 +22,7 @@ import ( "github.com/netobserv/network-observability-operator/api/v1beta2" utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/metrics" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -73,6 +74,12 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { // Loki dst.Spec.Loki.Enable = restored.Spec.Loki.Enable + if restored.Spec.Processor.Metrics.IncludeList != nil { + list := make([]string, len(*restored.Spec.Processor.Metrics.IncludeList)) + copy(list, *restored.Spec.Processor.Metrics.IncludeList) + dst.Spec.Processor.Metrics.IncludeList = &list + } + return nil } @@ -172,3 +179,12 @@ func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2 func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s apiconversion.Scope) error { return autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) } + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s apiconversion.Scope) error { + includeList := metrics.GetEnabledNames(in.IgnoreTags, nil) + out.IncludeList = &includeList + return autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) +} diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index ca75958fa..7f3b86a99 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -88,11 +88,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FileReference)(nil), (*v1beta2.FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_FileReference_To_v1beta2_FileReference(a.(*FileReference), b.(*v1beta2.FileReference), scope) }); err != nil { @@ -263,6 +258,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) }); err != nil { @@ -433,20 +433,15 @@ func autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out * if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } - out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + // WARNING: in.IgnoreTags requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics is an autogenerated conversion function. -func Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { - return autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) -} - func autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { if err := Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } - out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + // WARNING: in.IncludeList requires manual conversion: does not exist in peer-type // WARNING: in.DisableAlerts requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index cf7254da1..18c48d90f 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -344,13 +344,24 @@ type FLPMetrics struct { // +optional Server MetricsServerConfig `json:"server,omitempty"` - // `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . + // `ignoreTags` [deprecated (*)] is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . // Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. - // Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity). - //+kubebuilder:default:={"egress","packets","nodes-flows","namespaces-flows","workloads-flows","namespaces"} + // Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).
+ // Deprecation notice: use `includeList` instead. + // +kubebuilder:default:={"egress","packets","nodes-flows","namespaces-flows","workloads-flows","namespaces"} // +optional IgnoreTags []string `json:"ignoreTags"` + // `includeList` is a list of metric names to specify which metrics to generate. + // The names correspond to the name in Prometheus, without the prefix. For example, + // `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. + // Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, + // `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, + // `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, + // `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`. + // +optional + IncludeList *[]string `json:"includeList,omitempty"` + // `disableAlerts` is a list of alerts that should be disabled. // Possible values are:
// `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go index 585fe20ee..00885512c 100644 --- a/api/v1beta1/flowcollector_webhook.go +++ b/api/v1beta1/flowcollector_webhook.go @@ -22,6 +22,7 @@ import ( "github.com/netobserv/network-observability-operator/api/v1beta2" utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/metrics" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -79,13 +80,6 @@ func Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.Fl return autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in, out, s) } -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1beta1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { - return autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) -} - // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta2 not in v1beta1 // nolint:golint,stylecheck,revive @@ -129,3 +123,23 @@ func Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowColl } return autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) } + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { + return autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s apiconversion.Scope) error { + err := autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) + if err != nil { + return err + } + includeList := metrics.GetEnabledNames(in.IgnoreTags, in.IncludeList) + out.IncludeList = &includeList + return nil +} diff --git a/api/v1beta1/flowcollector_webhook_test.go b/api/v1beta1/flowcollector_webhook_test.go new file mode 100644 index 000000000..1617dae0a --- /dev/null +++ b/api/v1beta1/flowcollector_webhook_test.go @@ -0,0 +1,160 @@ +package v1beta1 + +import ( + "testing" + + "github.com/netobserv/network-observability-operator/api/v1beta2" + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func TestBeta1ConversionRoundtrip_Loki(t *testing.T) { + // Testing beta1 -> beta2 -> beta1 + assert := assert.New(t) + + initial := FlowCollector{ + Spec: FlowCollectorSpec{ + Loki: FlowCollectorLoki{ + Enable: ptr.To(true), + URL: "http://loki", + StatusURL: "http://loki/status", + QuerierURL: "http://loki/querier", + TenantID: "tenant", + AuthToken: LokiAuthForwardUserToken, + TLS: ClientTLS{ + Enable: true, + InsecureSkipVerify: true, + }, + StatusTLS: ClientTLS{ + Enable: true, + InsecureSkipVerify: true, + }, + BatchSize: 1000, + }, + }, + } + + var converted v1beta2.FlowCollector + err := initial.ConvertTo(&converted) + assert.NoError(err) + + assert.Equal(v1beta2.LokiModeManual, converted.Spec.Loki.Mode) + assert.True(*converted.Spec.Loki.Enable) + assert.Equal("http://loki", converted.Spec.Loki.Manual.IngesterURL) + assert.Equal("http://loki/status", converted.Spec.Loki.Manual.StatusURL) + assert.Equal("http://loki/querier", converted.Spec.Loki.Manual.QuerierURL) + assert.Equal("tenant", converted.Spec.Loki.Manual.TenantID) + assert.Equal(LokiAuthForwardUserToken, converted.Spec.Loki.Manual.AuthToken) + assert.True(converted.Spec.Loki.Manual.TLS.Enable) + assert.True(converted.Spec.Loki.Manual.TLS.InsecureSkipVerify) + assert.True(converted.Spec.Loki.Manual.StatusTLS.Enable) + assert.True(converted.Spec.Loki.Manual.StatusTLS.InsecureSkipVerify) + + // Other way + var back FlowCollector + err = back.ConvertFrom(&converted) + assert.NoError(err) + assert.Equal(initial.Spec.Loki, back.Spec.Loki) +} + +func TestBeta2ConversionRoundtrip_Loki(t *testing.T) { + // Testing beta2 -> beta1 -> beta2 + assert := assert.New(t) + + initial := v1beta2.FlowCollector{ + Spec: v1beta2.FlowCollectorSpec{ + Loki: v1beta2.FlowCollectorLoki{ + Enable: ptr.To(true), + Mode: v1beta2.LokiModeLokiStack, + LokiStack: v1beta2.LokiStackRef{ + Name: "lokiii", + Namespace: "lokins", + }, + BatchSize: 1000, + }, + }, + } + + var converted FlowCollector + err := converted.ConvertFrom(&initial) + assert.NoError(err) + + assert.True(*converted.Spec.Loki.Enable) + assert.Equal("https://lokiii-gateway-http.lokins.svc:8080/api/logs/v1/network/", converted.Spec.Loki.URL) + assert.Equal("https://lokiii-query-frontend-http.lokins.svc:3100/", converted.Spec.Loki.StatusURL) + assert.Equal("https://lokiii-gateway-http.lokins.svc:8080/api/logs/v1/network/", converted.Spec.Loki.QuerierURL) + assert.Equal("network", converted.Spec.Loki.TenantID) + assert.Equal(LokiAuthForwardUserToken, converted.Spec.Loki.AuthToken) + assert.True(converted.Spec.Loki.TLS.Enable) + assert.False(converted.Spec.Loki.TLS.InsecureSkipVerify) + assert.True(converted.Spec.Loki.StatusTLS.Enable) + assert.False(converted.Spec.Loki.StatusTLS.InsecureSkipVerify) + + // Other way + var back v1beta2.FlowCollector + err = converted.ConvertTo(&back) + assert.NoError(err) + assert.Equal(initial.Spec.Loki, back.Spec.Loki) +} + +func TestBeta1ConversionRoundtrip_Metrics(t *testing.T) { + // Testing beta1 -> beta2 -> beta1 + assert := assert.New(t) + + initial := FlowCollector{ + Spec: FlowCollectorSpec{ + Processor: FlowCollectorFLP{ + Metrics: FLPMetrics{ + DisableAlerts: []FLPAlert{AlertLokiError}, + IgnoreTags: []string{"nodes", "workloads", "bytes", "ingress"}, + }, + }, + }, + } + + var converted v1beta2.FlowCollector + err := initial.ConvertTo(&converted) + assert.NoError(err) + + assert.Equal([]v1beta2.FLPAlert{v1beta2.AlertLokiError}, converted.Spec.Processor.Metrics.DisableAlerts) + assert.NotNil(converted.Spec.Processor.Metrics.IncludeList) + assert.Equal([]string{"namespace_egress_packets_total", "namespace_flows_total"}, *converted.Spec.Processor.Metrics.IncludeList) + + // Other way + var back FlowCollector + err = back.ConvertFrom(&converted) + assert.NoError(err) + // Here, includeList is preserved; it takes precedence over ignoreTags + assert.Equal([]string{"namespace_egress_packets_total", "namespace_flows_total"}, *back.Spec.Processor.Metrics.IncludeList) + assert.Equal(initial.Spec.Processor.Metrics.DisableAlerts, back.Spec.Processor.Metrics.DisableAlerts) + assert.Equal(initial.Spec.Processor.Metrics.Server, back.Spec.Processor.Metrics.Server) +} + +func TestBeta2ConversionRoundtrip_Metrics(t *testing.T) { + // Testing beta2 -> beta1 -> beta2 + assert := assert.New(t) + + initial := v1beta2.FlowCollector{ + Spec: v1beta2.FlowCollectorSpec{ + Processor: v1beta2.FlowCollectorFLP{ + Metrics: v1beta2.FLPMetrics{ + DisableAlerts: []v1beta2.FLPAlert{v1beta2.AlertLokiError}, + IncludeList: &[]string{"namespace_egress_packets_total", "namespace_flows_total"}, + }, + }, + }, + } + + var converted FlowCollector + err := converted.ConvertFrom(&initial) + assert.NoError(err) + + assert.Equal([]FLPAlert{AlertLokiError}, converted.Spec.Processor.Metrics.DisableAlerts) + assert.NotNil(converted.Spec.Processor.Metrics.IncludeList) + assert.Equal([]string{"namespace_egress_packets_total", "namespace_flows_total"}, *converted.Spec.Processor.Metrics.IncludeList) + + var back v1beta2.FlowCollector + err = converted.ConvertTo(&back) + assert.NoError(err) + assert.Equal(initial.Spec.Processor.Metrics, back.Spec.Processor.Metrics) +} diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 602260f54..e150d3944 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -88,11 +88,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FileReference)(nil), (*v1beta2.FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FileReference_To_v1beta2_FileReference(a.(*FileReference), b.(*v1beta2.FileReference), scope) }); err != nil { @@ -278,6 +273,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) }); err != nil { @@ -429,21 +429,17 @@ func autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v if err := Convert_v1beta1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } - out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + // WARNING: in.IgnoreTags requires manual conversion: does not exist in peer-type + out.IncludeList = (*[]string)(unsafe.Pointer(in.IncludeList)) out.DisableAlerts = *(*[]v1beta2.FLPAlert)(unsafe.Pointer(&in.DisableAlerts)) return nil } -// Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics is an autogenerated conversion function. -func Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { - return autoConvert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) -} - func autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { if err := Convert_v1beta2_MetricsServerConfig_To_v1beta1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { return err } - out.IgnoreTags = *(*[]string)(unsafe.Pointer(&in.IgnoreTags)) + out.IncludeList = (*[]string)(unsafe.Pointer(in.IncludeList)) out.DisableAlerts = *(*[]FLPAlert)(unsafe.Pointer(&in.DisableAlerts)) return nil } diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 09748c294..ee5647448 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -132,6 +132,15 @@ func (in *FLPMetrics) DeepCopyInto(out *FLPMetrics) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.IncludeList != nil { + in, out := &in.IncludeList, &out.IncludeList + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } if in.DisableAlerts != nil { in, out := &in.DisableAlerts, &out.DisableAlerts *out = make([]FLPAlert, len(*in)) diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 1f6932924..a69bb385b 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -344,12 +344,15 @@ type FLPMetrics struct { // +optional Server MetricsServerConfig `json:"server,omitempty"` - // `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . - // Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. - // Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity). - //+kubebuilder:default:={"egress","packets","nodes-flows","namespaces-flows","workloads-flows","namespaces"} - // +optional - IgnoreTags []string `json:"ignoreTags"` + // `includeList` is a list of metric names to specify which metrics to generate. + // The names correspond to the name in Prometheus, without the prefix. For example, + // `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. + // Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, + // `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, + // `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, + // `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`. + // +optional + IncludeList *[]string `json:"includeList,omitempty"` // `disableAlerts` is a list of alerts that should be disabled. // Possible values are:
diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 3f25bdcb2..74f820cfc 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -127,10 +127,14 @@ func (in *DebugConfig) DeepCopy() *DebugConfig { func (in *FLPMetrics) DeepCopyInto(out *FLPMetrics) { *out = *in in.Server.DeepCopyInto(&out.Server) - if in.IgnoreTags != nil { - in, out := &in.IgnoreTags, &out.IgnoreTags - *out = make([]string, len(*in)) - copy(*out, *in) + if in.IncludeList != nil { + in, out := &in.IncludeList, &out.IncludeList + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } } if in.DisableAlerts != nil { in, out := &in.DisableAlerts, &out.DisableAlerts diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index e5d2264d3..75d67482e 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -4812,15 +4812,32 @@ spec: - namespaces-flows - workloads-flows - namespaces - description: '`ignoreTags` is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + description: '`ignoreTags` [deprecated (*)] is a list of tags + to specify which metrics to ignore. Each metric is associated + with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` - offering a finer granularity).' + offering a finer granularity).
Deprecation notice: use + `includeList` instead.' + items: + type: string + type: array + includeList: + description: '`includeList` is a list of metric names to specify + which metrics to generate. The names correspond to the name + in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + will show up as `netobserv_namespace_egress_packets_total` + in Prometheus. Available names are: `namespace_egress_bytes_total`, + `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, + `namespace_ingress_packets_total`, `namespace_flows_total`, + `node_egress_bytes_total`, `node_egress_packets_total`, + `node_ingress_bytes_total`, `node_ingress_packets_total`, + `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, + `workload_ingress_bytes_total`, `workload_ingress_packets_total`, + `workload_flows_total`.' items: type: string type: array @@ -7686,23 +7703,19 @@ spec: - NetObservLokiError type: string type: array - ignoreTags: - default: - - egress - - packets - - nodes-flows - - namespaces-flows - - workloads-flows - - namespaces - description: '`ignoreTags` is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions - . Available tags are: `egress`, `ingress`, `flows`, `bytes`, - `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, - `namespaces-flows`, `workloads-flows`. Namespace-based metrics - are covered by both `workloads` and `namespaces` tags, hence - it is recommended to always ignore one of them (`workloads` - offering a finer granularity).' + includeList: + description: '`includeList` is a list of metric names to specify + which metrics to generate. The names correspond to the name + in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + will show up as `netobserv_namespace_egress_packets_total` + in Prometheus. Available names are: `namespace_egress_bytes_total`, + `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, + `namespace_ingress_packets_total`, `namespace_flows_total`, + `node_egress_bytes_total`, `node_egress_packets_total`, + `node_ingress_bytes_total`, `node_ingress_packets_total`, + `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, + `workload_ingress_bytes_total`, `workload_ingress_packets_total`, + `workload_flows_total`.' items: type: string type: array diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 4fe79d1a9..3905d16a0 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -325,13 +325,10 @@ metadata: "logTypes": "FLOWS", "metrics": { "disableAlerts": [], - "ignoreTags": [ - "egress", - "packets", - "nodes-flows", - "namespaces-flows", - "workloads-flows", - "namespaces" + "includeList": [ + "node_ingress_bytes_total", + "workload_ingress_bytes_total", + "namespace_flows_total" ], "server": { "port": 9102 @@ -474,7 +471,7 @@ metadata: "maxRetries": 2, "minBackoff": "1s", "mode": "Monolithic", - "monolith": { + "monolithic": { "tenantID": "netobserv", "tls": { "caCert": { @@ -502,13 +499,10 @@ metadata: "logTypes": "FLOWS", "metrics": { "disableAlerts": [], - "ignoreTags": [ - "egress", - "packets", - "nodes-flows", - "namespaces-flows", - "workloads-flows", - "namespaces" + "includeList": [ + "node_ingress_bytes_total", + "workload_ingress_bytes_total", + "namespace_flows_total" ], "server": { "port": 9102 diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index f1b694203..1ecef980a 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -4798,15 +4798,32 @@ spec: - namespaces-flows - workloads-flows - namespaces - description: '`ignoreTags` is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions + description: '`ignoreTags` [deprecated (*)] is a list of tags + to specify which metrics to ignore. Each metric is associated + with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` - offering a finer granularity).' + offering a finer granularity).
Deprecation notice: use + `includeList` instead.' + items: + type: string + type: array + includeList: + description: '`includeList` is a list of metric names to specify + which metrics to generate. The names correspond to the name + in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + will show up as `netobserv_namespace_egress_packets_total` + in Prometheus. Available names are: `namespace_egress_bytes_total`, + `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, + `namespace_ingress_packets_total`, `namespace_flows_total`, + `node_egress_bytes_total`, `node_egress_packets_total`, + `node_ingress_bytes_total`, `node_ingress_packets_total`, + `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, + `workload_ingress_bytes_total`, `workload_ingress_packets_total`, + `workload_flows_total`.' items: type: string type: array @@ -7672,23 +7689,19 @@ spec: - NetObservLokiError type: string type: array - ignoreTags: - default: - - egress - - packets - - nodes-flows - - namespaces-flows - - workloads-flows - - namespaces - description: '`ignoreTags` is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions - . Available tags are: `egress`, `ingress`, `flows`, `bytes`, - `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, - `namespaces-flows`, `workloads-flows`. Namespace-based metrics - are covered by both `workloads` and `namespaces` tags, hence - it is recommended to always ignore one of them (`workloads` - offering a finer granularity).' + includeList: + description: '`includeList` is a list of metric names to specify + which metrics to generate. The names correspond to the name + in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + will show up as `netobserv_namespace_egress_packets_total` + in Prometheus. Available names are: `namespace_egress_bytes_total`, + `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, + `namespace_ingress_packets_total`, `namespace_flows_total`, + `node_egress_bytes_total`, `node_egress_packets_total`, + `node_ingress_bytes_total`, `node_ingress_packets_total`, + `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, + `workload_ingress_bytes_total`, `workload_ingress_packets_total`, + `workload_flows_total`.' items: type: string type: array diff --git a/config/samples/flows_v1beta1_flowcollector.yaml b/config/samples/flows_v1beta1_flowcollector.yaml index 8d4007e93..8467d1eed 100644 --- a/config/samples/flows_v1beta1_flowcollector.yaml +++ b/config/samples/flows_v1beta1_flowcollector.yaml @@ -35,16 +35,11 @@ spec: metrics: server: port: 9102 - ignoreTags: - - egress - - packets - - nodes-flows - - namespaces-flows - - workloads-flows - # Ignoring eihter "namespaces" or "workloads", as namespaces metrics are included in workload metrics - # This helps reduce total cardinality - - namespaces disableAlerts: [] + includeList: + - node_ingress_bytes_total + - workload_ingress_bytes_total + - namespace_flows_total dropUnusedFields: true resources: requests: diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml index 41b768b4c..ba65a45e0 100644 --- a/config/samples/flows_v1beta2_flowcollector.yaml +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -35,16 +35,11 @@ spec: metrics: server: port: 9102 - ignoreTags: - - egress - - packets - - nodes-flows - - namespaces-flows - - workloads-flows - # Ignoring eihter "namespaces" or "workloads", as namespaces metrics are included in workload metrics - # This helps reduce total cardinality - - namespaces disableAlerts: [] + includeList: + - node_ingress_bytes_total + - workload_ingress_bytes_total + - namespace_flows_total dropUnusedFields: true resources: requests: @@ -80,7 +75,7 @@ spec: enable: true # Change mode to "LokiStack" to use with the loki operator mode: Monolithic - monolith: + monolithic: url: 'http://loki.netobserv.svc:3100/' tenantID: netobserv tls: @@ -146,4 +141,4 @@ spec: # ipfix: # targetHost: "ipfix-collector.ipfix.svc.cluster.local" # targetPort: 4739 - # transport: TCP or UDP (optional - defaults to TCP) \ No newline at end of file + # transport: TCP or UDP (optional - defaults to TCP) diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index c05557c07..2f71794f5 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -332,14 +332,15 @@ func (r *FlowCollectorReconciler) reconcileOperator(ctx context.Context, cmn *re } if r.availableAPIs.HasSvcMonitor() { - desiredFlowDashboardCM, del, err := buildFlowMetricsDashboard(cmn.Namespace, desired.Spec.Processor.Metrics.IgnoreTags) + names := helper.GetIncludeList(&desired.Spec.Processor.Metrics) + desiredFlowDashboardCM, del, err := buildFlowMetricsDashboard(cmn.Namespace, names) if err != nil { return err } else if err = cmn.ReconcileConfigMap(ctx, desiredFlowDashboardCM, del); err != nil { return err } - desiredHealthDashboardCM, del, err := buildHealthDashboard(cmn.Namespace, desired.Spec.Processor.Metrics.IgnoreTags) + desiredHealthDashboardCM, del, err := buildHealthDashboard(cmn.Namespace, names) if err != nil { return err } else if err = cmn.ReconcileConfigMap(ctx, desiredHealthDashboardCM, del); err != nil { diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 19f5f704d..d593f9ccd 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -81,7 +81,6 @@ func flowCollectorIsoSpecs() { Provided: nil, }, }, - IgnoreTags: []string{}, DisableAlerts: []flowslatest.FLPAlert{}, }, EnableKubeProbes: ptr.To(false), diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 12f16e7e1..6a10a58ee 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -22,6 +22,7 @@ import ( "github.com/netobserv/network-observability-operator/controllers/constants" . "github.com/netobserv/network-observability-operator/controllers/controllerstest" "github.com/netobserv/network-observability-operator/controllers/flowlogspipeline" + "github.com/netobserv/network-observability-operator/pkg/test" ) const ( @@ -117,7 +118,7 @@ func flowCollectorControllerSpecs() { Duration: conntrackTerminatingTimeout, }, Metrics: flowslatest.FLPMetrics{ - IgnoreTags: []string{"flows"}, + IncludeList: &[]string{"node_ingress_bytes_total", "namespace_ingress_bytes_total", "workload_ingress_bytes_total"}, }, }, Agent: flowslatest.FlowCollectorAgent{ @@ -277,7 +278,7 @@ func flowCollectorControllerSpecs() { Duration: conntrackTerminatingTimeout, }, Metrics: flowslatest.FLPMetrics{ - IgnoreTags: []string{"flows", "bytes", "packets"}, + IncludeList: &[]string{"node_ingress_bytes_total"}, DisableAlerts: []flowslatest.FLPAlert{flowslatest.AlertLokiError}, }, } @@ -365,9 +366,16 @@ func flowCollectorControllerSpecs() { }, &cm); err != nil { return err } - return cm.Data["netobserv-health-metrics.json"] - }, timeout, interval).Should(Satisfy(func(json string) bool { - return !strings.Contains(json, "flows") && strings.Contains(json, "Agents") && strings.Contains(json, "Processor") && strings.Contains(json, "Operator") + d, err := test.DashboardFromBytes([]byte(cm.Data["netobserv-health-metrics.json"])) + if err != nil { + return err + } + return d.Titles() + }, timeout, interval).Should(Equal([]string{ + "Flows", + "Agents", + "Processor", + "Operator", })) }) diff --git a/controllers/flowcollector_objects.go b/controllers/flowcollector_objects.go index dbfa56435..3075e3bce 100644 --- a/controllers/flowcollector_objects.go +++ b/controllers/flowcollector_objects.go @@ -1,18 +1,13 @@ package controllers import ( - _ "embed" - "github.com/netobserv/network-observability-operator/controllers/constants" - "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/dashboards" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//go:embed infra_health_dashboard.json -var healthDashboardEmbed string - const ( downstreamLabelKey = "openshift.io/cluster-monitoring" downstreamLabelValue = "true" @@ -78,8 +73,8 @@ func buildRoleBindingMonitoringReader(ns string) *rbacv1.ClusterRoleBinding { } } -func buildFlowMetricsDashboard(namespace string, ignoreFlags []string) (*corev1.ConfigMap, bool, error) { - dashboard, err := helper.CreateFlowMetricsDashboard(namespace, ignoreFlags) +func buildFlowMetricsDashboard(namespace string, metrics []string) (*corev1.ConfigMap, bool, error) { + dashboard, err := dashboards.CreateFlowMetricsDashboard(namespace, metrics) if err != nil { return nil, false, err } @@ -99,8 +94,8 @@ func buildFlowMetricsDashboard(namespace string, ignoreFlags []string) (*corev1. return &configMap, len(dashboard) == 0, nil } -func buildHealthDashboard(namespace string, ignoreFlags []string) (*corev1.ConfigMap, bool, error) { - dashboard, err := helper.FilterDashboardRows(healthDashboardEmbed, namespace, ignoreFlags) +func buildHealthDashboard(namespace string, metrics []string) (*corev1.ConfigMap, bool, error) { + dashboard, err := dashboards.CreateHealthDashboard(namespace, metrics) if err != nil { return nil, false, err } diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index d89e8ef3c..3216c6f2c 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -1,16 +1,13 @@ package flowlogspipeline import ( - "embed" "encoding/json" "fmt" "hash/fnv" - "path/filepath" "strconv" "time" "github.com/netobserv/flowlogs-pipeline/pkg/api" - "github.com/netobserv/flowlogs-pipeline/pkg/confgen" "github.com/netobserv/flowlogs-pipeline/pkg/config" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promConfig "github.com/prometheus/common/config" @@ -26,6 +23,7 @@ import ( "github.com/netobserv/network-observability-operator/controllers/reconcilers" "github.com/netobserv/network-observability-operator/pkg/filters" "github.com/netobserv/network-observability-operator/pkg/helper" + "github.com/netobserv/network-observability-operator/pkg/metrics" "github.com/netobserv/network-observability-operator/pkg/volumes" ) @@ -33,7 +31,6 @@ const ( configVolume = "config-volume" configPath = "/etc/flowlogs-pipeline" configFile = "config.json" - metricsConfigDir = "metrics_definitions" lokiToken = "loki-token" healthServiceName = "health" prometheusServiceName = "prometheus" @@ -238,49 +235,6 @@ func (b *builder) podTemplate(hasHostPort, hostNetwork bool, annotations map[str } } -//go:embed metrics_definitions -var metricsConfigEmbed embed.FS - -// obtainMetricsConfiguration returns the configuration info for the prometheus stage needed to -// supply the metrics for those metrics -func (b *builder) obtainMetricsConfiguration() (api.PromMetricsItems, error) { - entries, err := metricsConfigEmbed.ReadDir(metricsConfigDir) - if err != nil { - return nil, fmt.Errorf("failed to access metrics_definitions directory: %w", err) - } - - cg := confgen.NewConfGen(&confgen.Options{ - GenerateStages: []string{"encode_prom"}, - SkipWithTags: b.desired.Processor.Metrics.IgnoreTags, - }) - - for _, entry := range entries { - fileName := entry.Name() - if fileName == "config.yaml" { - continue - } - srcPath := filepath.Join(metricsConfigDir, fileName) - - input, err := metricsConfigEmbed.ReadFile(srcPath) - if err != nil { - return nil, fmt.Errorf("error reading metrics file %s; %w", srcPath, err) - } - err = cg.ParseDefinition(fileName, input) - if err != nil { - return nil, fmt.Errorf("error parsing metrics file %s; %w", srcPath, err) - } - } - - stages := cg.GenerateTruncatedConfig() - if len(stages) != 1 { - return nil, fmt.Errorf("error generating truncated config, 1 stage expected in %v", stages) - } - if stages[0].Encode == nil || stages[0].Encode.Prom == nil { - return nil, fmt.Errorf("error generating truncated config, Encode expected in %v", stages) - } - return stages[0].Encode.Prom.Metrics, nil -} - func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error { lastStage := *stage indexFields := constants.LokiIndexFields @@ -372,10 +326,8 @@ func (b *builder) addTransformStages(stage *config.PipelineBuilderStage) error { } // obtain encode_prometheus stage from metrics_definitions - promMetrics, err := b.obtainMetricsConfiguration() - if err != nil { - return err - } + names := helper.GetIncludeList(&b.desired.Processor.Metrics) + promMetrics := metrics.GetDefinitions(names) if len(promMetrics) > 0 { // prometheus stage (encode) configuration diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index c51edbf76..04981295e 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -19,8 +19,10 @@ package flowlogspipeline import ( "encoding/json" "fmt" + "sort" "testing" + "github.com/netobserv/flowlogs-pipeline/pkg/api" "github.com/netobserv/flowlogs-pipeline/pkg/config" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" @@ -892,7 +894,16 @@ func TestPipelineTraceStage(t *testing.T) { assert.Equal(`[{"name":"ipfix"},{"name":"extract_conntrack","follows":"ipfix"},{"name":"enrich","follows":"extract_conntrack"},{"name":"loki","follows":"enrich"},{"name":"stdout","follows":"enrich"},{"name":"prometheus","follows":"enrich"}]`, string(jsonStages)) } -func TestMergeMetricsConfigurationNoIgnore(t *testing.T) { +func getSortedMetricsNames(m []api.PromMetricsItem) []string { + ret := []string{} + for i := range m { + ret = append(ret, m[i].Name) + } + sort.Strings(ret) + return ret +} + +func TestMergeMetricsConfiguration_Default(t *testing.T) { assert := assert.New(t) cfg := getConfig() @@ -903,30 +914,19 @@ func TestMergeMetricsConfigurationNoIgnore(t *testing.T) { assert.True(validatePipelineConfig(stages, parameters)) jsonStages, _ := json.Marshal(stages) assert.Equal(`[{"name":"ipfix"},{"name":"extract_conntrack","follows":"ipfix"},{"name":"enrich","follows":"extract_conntrack"},{"name":"loki","follows":"enrich"},{"name":"stdout","follows":"enrich"},{"name":"prometheus","follows":"enrich"}]`, string(jsonStages)) - assert.Len(parameters[5].Encode.Prom.Metrics, 15) - assert.Equal("namespace_egress_bytes_total", parameters[5].Encode.Prom.Metrics[0].Name) - assert.Equal("namespace_egress_packets_total", parameters[5].Encode.Prom.Metrics[1].Name) - assert.Equal("namespace_flows_total", parameters[5].Encode.Prom.Metrics[2].Name) - assert.Equal("namespace_ingress_bytes_total", parameters[5].Encode.Prom.Metrics[3].Name) - assert.Equal("namespace_ingress_packets_total", parameters[5].Encode.Prom.Metrics[4].Name) - assert.Equal("node_egress_bytes_total", parameters[5].Encode.Prom.Metrics[5].Name) - assert.Equal("node_egress_packets_total", parameters[5].Encode.Prom.Metrics[6].Name) - assert.Equal("node_flows_total", parameters[5].Encode.Prom.Metrics[7].Name) - assert.Equal("node_ingress_bytes_total", parameters[5].Encode.Prom.Metrics[8].Name) - assert.Equal("node_ingress_packets_total", parameters[5].Encode.Prom.Metrics[9].Name) - assert.Equal("workload_egress_bytes_total", parameters[5].Encode.Prom.Metrics[10].Name) - assert.Equal("workload_egress_packets_total", parameters[5].Encode.Prom.Metrics[11].Name) - assert.Equal("workload_flows_total", parameters[5].Encode.Prom.Metrics[12].Name) - assert.Equal("workload_ingress_bytes_total", parameters[5].Encode.Prom.Metrics[13].Name) - assert.Equal("workload_ingress_packets_total", parameters[5].Encode.Prom.Metrics[14].Name) + names := getSortedMetricsNames(parameters[5].Encode.Prom.Metrics) + assert.Len(names, 3) + assert.Equal("namespace_flows_total", names[0]) + assert.Equal("node_ingress_bytes_total", names[1]) + assert.Equal("workload_ingress_bytes_total", names[2]) assert.Equal("netobserv_", parameters[5].Encode.Prom.Prefix) } -func TestMergeMetricsConfigurationWithIgnore(t *testing.T) { +func TestMergeMetricsConfiguration_WithList(t *testing.T) { assert := assert.New(t) cfg := getConfig() - cfg.Processor.Metrics.IgnoreTags = []string{"nodes"} + cfg.Processor.Metrics.IncludeList = &[]string{"namespace_egress_bytes_total", "namespace_ingress_bytes_total"} b := monoBuilder("namespace", &cfg) stages, parameters, err := b.buildPipelineConfig() @@ -934,16 +934,18 @@ func TestMergeMetricsConfigurationWithIgnore(t *testing.T) { assert.True(validatePipelineConfig(stages, parameters)) jsonStages, _ := json.Marshal(stages) assert.Equal(`[{"name":"ipfix"},{"name":"extract_conntrack","follows":"ipfix"},{"name":"enrich","follows":"extract_conntrack"},{"name":"loki","follows":"enrich"},{"name":"stdout","follows":"enrich"},{"name":"prometheus","follows":"enrich"}]`, string(jsonStages)) - assert.Len(parameters[5].Encode.Prom.Metrics, 10) - assert.Equal("namespace_egress_bytes_total", parameters[5].Encode.Prom.Metrics[0].Name) + names := getSortedMetricsNames(parameters[5].Encode.Prom.Metrics) + assert.Len(names, 2) + assert.Equal("namespace_egress_bytes_total", names[0]) + assert.Equal("namespace_ingress_bytes_total", names[1]) assert.Equal("netobserv_", parameters[5].Encode.Prom.Prefix) } -func TestMergeMetricsConfigurationIgnoreAll(t *testing.T) { +func TestMergeMetricsConfiguration_EmptyList(t *testing.T) { assert := assert.New(t) cfg := getConfig() - cfg.Processor.Metrics.IgnoreTags = []string{"nodes", "namespaces", "workloads"} + cfg.Processor.Metrics.IncludeList = &[]string{} b := monoBuilder("namespace", &cfg) stages, parameters, err := b.buildPipelineConfig() diff --git a/controllers/flowlogspipeline/metrics_definitions/namespace_egress_bytes_total.yaml b/controllers/flowlogspipeline/metrics_definitions/namespace_egress_bytes_total.yaml deleted file mode 100644 index c86e208dc..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/namespace_egress_bytes_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the namespaces outgoing traffic -details: - Sum bytes for outgoing traffic per source and destination namespaces -usage: - Evaluate network outgoing usage breakdown per source and destination namespaces -tags: - - egress - - bytes - - namespaces -encode: - type: prom - prom: - metrics: - - name: namespace_egress_bytes_total - type: counter - valuekey: Bytes - filters: - - key: FlowDirection - value: "1" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace diff --git a/controllers/flowlogspipeline/metrics_definitions/namespace_egress_packets_total.yaml b/controllers/flowlogspipeline/metrics_definitions/namespace_egress_packets_total.yaml deleted file mode 100644 index 292a349fe..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/namespace_egress_packets_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the namespaces outgoing traffic -details: - Sum packets for outgoing traffic per source and destination namespaces -usage: - Evaluate network outgoing usage breakdown per source and destination namespaces -tags: - - egress - - packets - - namespaces -encode: - type: prom - prom: - metrics: - - name: namespace_egress_packets_total - type: counter - valuekey: Packets - filters: - - key: FlowDirection - value: "1" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace diff --git a/controllers/flowlogspipeline/metrics_definitions/namespace_flows_total.yaml b/controllers/flowlogspipeline/metrics_definitions/namespace_flows_total.yaml deleted file mode 100644 index 70bd218f4..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/namespace_flows_total.yaml +++ /dev/null @@ -1,20 +0,0 @@ -#flp_confgen -description: - This metric counts flows per namespace -details: - Counting flows per source and destination namespaces -usage: - Evaluate number of flows per source and destination namespaces -tags: - - flows - - namespaces - - namespaces-flows -encode: - type: prom - prom: - metrics: - - name: namespace_flows_total - type: counter - labels: - - SrcK8S_Namespace - - DstK8S_Namespace diff --git a/controllers/flowlogspipeline/metrics_definitions/namespace_ingress_bytes_total.yaml b/controllers/flowlogspipeline/metrics_definitions/namespace_ingress_bytes_total.yaml deleted file mode 100644 index 82332300f..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/namespace_ingress_bytes_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the namespaces incoming traffic -details: - Sum bytes for incoming traffic per source and destination namespaces -usage: - Evaluate network incoming usage breakdown per source and destination namespaces -tags: - - ingress - - bytes - - namespaces -encode: - type: prom - prom: - metrics: - - name: namespace_ingress_bytes_total - type: counter - valuekey: Bytes - filters: - - key: FlowDirection - value: "0" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace diff --git a/controllers/flowlogspipeline/metrics_definitions/namespace_ingress_packets_total.yaml b/controllers/flowlogspipeline/metrics_definitions/namespace_ingress_packets_total.yaml deleted file mode 100644 index 4b9c0bcd3..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/namespace_ingress_packets_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the namespaces incoming traffic -details: - Sum packets for incoming traffic per source and destination namespaces -usage: - Evaluate network incoming usage breakdown per source and destination namespaces -tags: - - ingress - - packets - - namespaces -encode: - type: prom - prom: - metrics: - - name: namespace_ingress_packets_total - type: counter - valuekey: Packets - filters: - - key: FlowDirection - value: "0" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace diff --git a/controllers/flowlogspipeline/metrics_definitions/node_egress_bytes_total.yaml b/controllers/flowlogspipeline/metrics_definitions/node_egress_bytes_total.yaml deleted file mode 100644 index 99f3a8164..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/node_egress_bytes_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the nodes outgoing traffic -details: - Sum bytes for outgoing traffic per source and destination nodes -usage: - Evaluate network outgoing usage breakdown per source and destination nodes -tags: - - egress - - bytes - - nodes -encode: - type: prom - prom: - metrics: - - name: node_egress_bytes_total - type: counter - valuekey: Bytes - filters: - - key: FlowDirection - value: "1" - - key: Duplicate - value: "false" - labels: - - SrcK8S_HostName - - DstK8S_HostName diff --git a/controllers/flowlogspipeline/metrics_definitions/node_egress_packets_total.yaml b/controllers/flowlogspipeline/metrics_definitions/node_egress_packets_total.yaml deleted file mode 100644 index 488b0850d..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/node_egress_packets_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the nodes outgoing traffic -details: - Sum packets for outgoing traffic per source and destination nodes -usage: - Evaluate network outgoing usage breakdown per source and destination nodes -tags: - - egress - - packets - - nodes -encode: - type: prom - prom: - metrics: - - name: node_egress_packets_total - type: counter - valuekey: Packets - filters: - - key: FlowDirection - value: "1" - - key: Duplicate - value: "false" - labels: - - SrcK8S_HostName - - DstK8S_HostName diff --git a/controllers/flowlogspipeline/metrics_definitions/node_flows_total.yaml b/controllers/flowlogspipeline/metrics_definitions/node_flows_total.yaml deleted file mode 100644 index f130459dd..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/node_flows_total.yaml +++ /dev/null @@ -1,20 +0,0 @@ -#flp_confgen -description: - This metric counts flows per nodes -details: - Counting flows per source and destination nodes -usage: - Evaluate number of flows per source and destination nodes -tags: - - flows - - nodes - - nodes-flows -encode: - type: prom - prom: - metrics: - - name: node_flows_total - type: counter - labels: - - SrcK8S_HostName - - DstK8S_HostName diff --git a/controllers/flowlogspipeline/metrics_definitions/node_ingress_bytes_total.yaml b/controllers/flowlogspipeline/metrics_definitions/node_ingress_bytes_total.yaml deleted file mode 100644 index 14d704030..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/node_ingress_bytes_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the nodes incoming traffic -details: - Sum bytes for incoming traffic per source and destination nodes -usage: - Evaluate network incoming usage breakdown per source and destination nodes -tags: - - ingress - - bytes - - nodes -encode: - type: prom - prom: - metrics: - - name: node_ingress_bytes_total - type: counter - valuekey: Bytes - filters: - - key: FlowDirection - value: "0" - - key: Duplicate - value: "false" - labels: - - SrcK8S_HostName - - DstK8S_HostName diff --git a/controllers/flowlogspipeline/metrics_definitions/node_ingress_packets_total.yaml b/controllers/flowlogspipeline/metrics_definitions/node_ingress_packets_total.yaml deleted file mode 100644 index 303b8d7c2..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/node_ingress_packets_total.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#flp_confgen -description: - This metric observes the nodes incoming traffic -details: - Sum packets for incoming traffic per source and destination nodes -usage: - Evaluate network incoming usage breakdown per source and destination nodes -tags: - - ingress - - packets - - nodes -encode: - type: prom - prom: - metrics: - - name: node_ingress_packets_total - type: counter - valuekey: Packets - filters: - - key: FlowDirection - value: "0" - - key: Duplicate - value: "false" - labels: - - SrcK8S_HostName - - DstK8S_HostName diff --git a/controllers/flowlogspipeline/metrics_definitions/workload_egress_bytes_total.yaml b/controllers/flowlogspipeline/metrics_definitions/workload_egress_bytes_total.yaml deleted file mode 100644 index ee57a10c0..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/workload_egress_bytes_total.yaml +++ /dev/null @@ -1,30 +0,0 @@ -#flp_confgen -description: - This metric observes the outgoing traffic -details: - Sum bytes for outgoing traffic per source and destination namespaces and owners -usage: - Evaluate network outgoing usage breakdown per source and destination namespaces and owners -tags: - - egress - - bytes - - workloads -encode: - type: prom - prom: - metrics: - - name: workload_egress_bytes_total - type: counter - valuekey: Bytes - filters: - - key: FlowDirection - value: "1" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace - - SrcK8S_OwnerName - - DstK8S_OwnerName - - SrcK8S_OwnerType - - DstK8S_OwnerType diff --git a/controllers/flowlogspipeline/metrics_definitions/workload_egress_packets_total.yaml b/controllers/flowlogspipeline/metrics_definitions/workload_egress_packets_total.yaml deleted file mode 100644 index 3caa7de0f..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/workload_egress_packets_total.yaml +++ /dev/null @@ -1,30 +0,0 @@ -#flp_confgen -description: - This metric observes the outgoing traffic -details: - Sum packet number for outgoing traffic per source and destination namespaces and owners -usage: - Evaluate network incoming usage breakdown per source and destination namespaces and owners -tags: - - egress - - packets - - workloads -encode: - type: prom - prom: - metrics: - - name: workload_egress_packets_total - type: counter - valuekey: Packets - filters: - - key: FlowDirection - value: "1" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace - - SrcK8S_OwnerName - - DstK8S_OwnerName - - SrcK8S_OwnerType - - DstK8S_OwnerType diff --git a/controllers/flowlogspipeline/metrics_definitions/workload_flows_total.yaml b/controllers/flowlogspipeline/metrics_definitions/workload_flows_total.yaml deleted file mode 100644 index c722cff4d..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/workload_flows_total.yaml +++ /dev/null @@ -1,24 +0,0 @@ -#flp_confgen -description: - This metric counts flows per workloads -details: - Counting flows per source and destination workloads -usage: - Evaluate number of flows per source and destination workloads -tags: - - flows - - workloads - - workloads-flows -encode: - type: prom - prom: - metrics: - - name: workload_flows_total - type: counter - labels: - - SrcK8S_Namespace - - DstK8S_Namespace - - SrcK8S_OwnerName - - DstK8S_OwnerName - - SrcK8S_OwnerType - - DstK8S_OwnerType diff --git a/controllers/flowlogspipeline/metrics_definitions/workload_ingress_bytes_total.yaml b/controllers/flowlogspipeline/metrics_definitions/workload_ingress_bytes_total.yaml deleted file mode 100644 index e372f7204..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/workload_ingress_bytes_total.yaml +++ /dev/null @@ -1,30 +0,0 @@ -#flp_confgen -description: - This metric observes the incoming traffic -details: - Sum bytes for incoming traffic per source and destination namespaces and owners -usage: - Evaluate network incoming usage breakdown per source and destination namespaces and owners -tags: - - ingress - - bytes - - workloads -encode: - type: prom - prom: - metrics: - - name: workload_ingress_bytes_total - type: counter - valuekey: Bytes - filters: - - key: FlowDirection - value: "0" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace - - SrcK8S_OwnerName - - DstK8S_OwnerName - - SrcK8S_OwnerType - - DstK8S_OwnerType diff --git a/controllers/flowlogspipeline/metrics_definitions/workload_ingress_packets_total.yaml b/controllers/flowlogspipeline/metrics_definitions/workload_ingress_packets_total.yaml deleted file mode 100644 index 8e122f2e2..000000000 --- a/controllers/flowlogspipeline/metrics_definitions/workload_ingress_packets_total.yaml +++ /dev/null @@ -1,30 +0,0 @@ -#flp_confgen -description: - This metric observes the incoming traffic -details: - Sum packet number for incoming traffic per source and destination namespaces and owners -usage: - Evaluate network incoming usage breakdown per source and destination namespaces and owners -tags: - - ingress - - packets - - workloads -encode: - type: prom - prom: - metrics: - - name: workload_ingress_packets_total - type: counter - valuekey: Packets - filters: - - key: FlowDirection - value: "0" - - key: Duplicate - value: "false" - labels: - - SrcK8S_Namespace - - DstK8S_Namespace - - SrcK8S_OwnerName - - DstK8S_OwnerName - - SrcK8S_OwnerType - - DstK8S_OwnerType diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 06a2fb0ae..85f636ed9 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -8547,11 +8547,18 @@ target specifies the target value for the given metric ignoreTags []string - `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).
+ `ignoreTags` [deprecated (*)] is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).
Deprecation notice: use `includeList` instead.

Default: [egress packets nodes-flows namespaces-flows workloads-flows namespaces]
false + + includeList + []string + + `includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.
+ + false server object @@ -13705,12 +13712,10 @@ target specifies the target value for the given metric false - ignoreTags + includeList []string - `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).
-
- Default: [egress packets nodes-flows namespaces-flows workloads-flows namespaces]
+ `includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.
false diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index 202789ec0..d4bea90db 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -3334,7 +3334,12 @@ spec: - namespaces-flows - workloads-flows - namespaces - description: '`ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).' + description: '`ignoreTags` [deprecated (*)] is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).
Deprecation notice: use `includeList` instead.' + items: + type: string + type: array + includeList: + description: '`includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.' items: type: string type: array @@ -5313,15 +5318,8 @@ spec: - NetObservLokiError type: string type: array - ignoreTags: - default: - - egress - - packets - - nodes-flows - - namespaces-flows - - workloads-flows - - namespaces - description: '`ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).' + includeList: + description: '`includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.' items: type: string type: array diff --git a/pkg/helper/dashboard.go b/pkg/dashboards/dashboard.go similarity index 79% rename from pkg/helper/dashboard.go rename to pkg/dashboards/dashboard.go index b5e708a83..6f52c7559 100644 --- a/pkg/helper/dashboard.go +++ b/pkg/dashboards/dashboard.go @@ -1,7 +1,6 @@ -package helper +package dashboards import ( - "encoding/json" "fmt" "strings" @@ -238,26 +237,21 @@ func flowMetricsRow(netobsNs string, rowInfo rowInfo) string { `, panels, title) } -func CreateFlowMetricsDashboard(netobsNs string, ignoreFlags []string) (string, error) { +func CreateFlowMetricsDashboard(netobsNs string, metrics []string) (string, error) { var rows []string - ignoreNamespace := slices.Contains(ignoreFlags, metricTagNamespaces) for _, ri := range rowsInfo { - groupToCheck := ri.group - - // Replace *_namespace_* with *_workload_* metrics when relevant, as they can show the same information - if ignoreNamespace && strings.Contains(ri.metric, "_namespace_") { - // ri is a copy, safe to change - ri.metric = strings.Replace(ri.metric, "_namespace_", "_workload_", 1) - groupToCheck = metricTagWorkloads - } - - if !slices.Contains(ignoreFlags, ri.dir) && - !slices.Contains(ignoreFlags, groupToCheck) && - !slices.Contains(ignoreFlags, ri.valueType) && - !slices.Contains(ignoreFlags, ri.metric) { - + trimmed := strings.TrimPrefix(ri.metric, "netobserv_") + if slices.Contains(metrics, trimmed) { rows = append(rows, flowMetricsRow(netobsNs, ri)) + } else if strings.Contains(ri.metric, "_namespace_") { + // namespace-based panels can also be displayed using workload-based metrics + // Try again, replacing *_namespace_* with *_workload_* + ri.metric = strings.Replace(ri.metric, "_namespace_", "_workload_", 1) + trimmed = strings.TrimPrefix(ri.metric, "netobserv_") + if slices.Contains(metrics, trimmed) { + rows = append(rows, flowMetricsRow(netobsNs, ri)) + } } } @@ -324,47 +318,3 @@ func CreateFlowMetricsDashboard(netobsNs string, ignoreFlags []string) (string, } `, rowsStr), nil } - -func FilterDashboardRows(dashboard string, netobsNs string, ignoreFlags []string) (string, error) { - var result map[string]any - err := json.Unmarshal([]byte(dashboard), &result) - if err != nil { - return "", err - } - - // return dashboard as is if not containing rows - if result["rows"] == nil { - return dashboard, nil - } - - rows := result["rows"].([]any) - filteredRows := []map[string]any{} - for _, r := range rows { - row := r.(map[string]any) - - if row["tags"] != nil { - t := row["tags"].([]any) - tags := make([]string, len(t)) - for i := range t { - tags[i] = t[i].(string) - } - - // add any row that has tags not included in ignored flags - if !Intersect(tags, ignoreFlags) { - filteredRows = append(filteredRows, row) - } - } else { - // add any row that doesn't have tags - filteredRows = append(filteredRows, row) - } - } - - // return empty if dashboard doesn't contains rows anymore - if len(filteredRows) == 0 { - return "", nil - } - - result["rows"] = filteredRows - bytes, err := json.Marshal(result) - return strings.ReplaceAll(string(bytes), "$NETOBSERV_NS", netobsNs), err -} diff --git a/pkg/helper/dashboard_test.go b/pkg/dashboards/dashboard_test.go similarity index 78% rename from pkg/helper/dashboard_test.go rename to pkg/dashboards/dashboard_test.go index f8e1fd683..01430c265 100644 --- a/pkg/helper/dashboard_test.go +++ b/pkg/dashboards/dashboard_test.go @@ -1,34 +1,20 @@ -package helper +package dashboards import ( - "encoding/json" "testing" + "github.com/netobserv/network-observability-operator/pkg/metrics" + "github.com/netobserv/network-observability-operator/pkg/test" "github.com/stretchr/testify/assert" ) -type dashboard struct { - Rows []struct { - Panels []struct { - Targets []struct { - Expr string `json:"expr"` - LegendFormat string `json:"legendFormat"` - } `json:"targets"` - Title string `json:"title"` - } `json:"panels"` - Title string `json:"title"` - } `json:"rows"` - Title string `json:"title"` -} - func TestCreateFlowMetricsDashboard_All(t *testing.T) { assert := assert.New(t) - js, err := CreateFlowMetricsDashboard("netobserv", []string{}) + js, err := CreateFlowMetricsDashboard("netobserv", metrics.GetAllNames()) assert.NoError(err) - var d dashboard - err = json.Unmarshal([]byte(js), &d) + d, err := test.DashboardFromBytes([]byte(js)) assert.NoError(err) assert.Equal("NetObserv", d.Title) @@ -74,39 +60,10 @@ func TestCreateFlowMetricsDashboard_All(t *testing.T) { func TestCreateFlowMetricsDashboard_OnlyNodeIngressBytes(t *testing.T) { assert := assert.New(t) - js, err := CreateFlowMetricsDashboard("netobserv", []string{metricTagNamespaces, metricTagWorkloads, metricTagEgress, metricTagPackets}) - assert.NoError(err) - - var d dashboard - err = json.Unmarshal([]byte(js), &d) - assert.NoError(err) - - assert.Equal("NetObserv", d.Title) - assert.Len(d.Rows, 1) - - // First row - row := 0 - assert.Equal("Top byte rates received per source and destination nodes", d.Rows[row].Title) - assert.Len(d.Rows[row].Panels, 1) - assert.Equal("", d.Rows[row].Panels[0].Title) - assert.Len(d.Rows[row].Panels[0].Targets, 1) - assert.Contains(d.Rows[row].Panels[0].Targets[0].Expr, "label_replace(label_replace(topk(10,sum(rate(netobserv_node_ingress_bytes_total[1m])) by (SrcK8S_HostName, DstK8S_HostName))") -} - -func TestCreateFlowMetricsDashboard_RemoveByMetricName(t *testing.T) { - assert := assert.New(t) - - js, err := CreateFlowMetricsDashboard("netobserv", []string{ - metricTagNamespaces, - metricTagWorkloads, - "netobserv_node_egress_packets_total", - "netobserv_node_ingress_packets_total", - "netobserv_node_egress_bytes_total", - }) + js, err := CreateFlowMetricsDashboard("netobserv", []string{"node_ingress_bytes_total"}) assert.NoError(err) - var d dashboard - err = json.Unmarshal([]byte(js), &d) + d, err := test.DashboardFromBytes([]byte(js)) assert.NoError(err) assert.Equal("NetObserv", d.Title) @@ -121,14 +78,13 @@ func TestCreateFlowMetricsDashboard_RemoveByMetricName(t *testing.T) { assert.Contains(d.Rows[row].Panels[0].Targets[0].Expr, "label_replace(label_replace(topk(10,sum(rate(netobserv_node_ingress_bytes_total[1m])) by (SrcK8S_HostName, DstK8S_HostName))") } -func TestCreateFlowMetricsDashboard_DefaultIgnoreTags(t *testing.T) { +func TestCreateFlowMetricsDashboard_DefaultList(t *testing.T) { assert := assert.New(t) - js, err := CreateFlowMetricsDashboard("netobserv", []string{"egress", "packets", "namespaces"}) + js, err := CreateFlowMetricsDashboard("netobserv", metrics.DefaultIncludeList) assert.NoError(err) - var d dashboard - err = json.Unmarshal([]byte(js), &d) + d, err := test.DashboardFromBytes([]byte(js)) assert.NoError(err) assert.Equal("NetObserv", d.Title) @@ -171,3 +127,56 @@ func TestCreateFlowMetricsDashboard_DefaultIgnoreTags(t *testing.T) { `label_replace(label_replace(topk(10,sum(rate(netobserv_workload_ingress_bytes_total{SrcK8S_Namespace=~"netobserv|openshift.*"}[1m]) or rate(netobserv_workload_ingress_bytes_total{SrcK8S_Namespace!~"netobserv|openshift.*",DstK8S_Namespace=~"netobserv|openshift.*"}[1m])) by (SrcK8S_Namespace, SrcK8S_OwnerName, DstK8S_Namespace, DstK8S_OwnerName))`, ) } + +func TestCreateHealthDashboard_Default(t *testing.T) { + assert := assert.New(t) + + js, err := CreateHealthDashboard("netobserv", metrics.DefaultIncludeList) + assert.NoError(err) + + d, err := test.DashboardFromBytes([]byte(js)) + assert.NoError(err) + + assert.Equal("NetObserv / Health", d.Title) + assert.Equal([]string{ + "Flows", + "Flows Overhead", + "Top flow rates per source and destination namespaces", + "Agents", + "Processor", + "Operator", + }, d.Titles()) + + // First row + row := 0 + assert.Len(d.Rows[row].Panels, 1) + assert.Equal("Rates", d.Rows[row].Panels[0].Title) + assert.Len(d.Rows[row].Panels[0].Targets, 3) + assert.Contains(d.Rows[row].Panels[0].Targets[0].Expr, "netobserv_ingest_flows_processed") + + // 3rd row + row = 2 + assert.Len(d.Rows[row].Panels, 2) + assert.Equal("Applications", d.Rows[row].Panels[0].Title) + assert.Equal("Infrastructure", d.Rows[row].Panels[1].Title) + assert.Len(d.Rows[row].Panels[0].Targets, 1) + assert.Contains(d.Rows[row].Panels[0].Targets[0].Expr, "netobserv_namespace_flows_total") +} + +func TestCreateHealthDashboard_NoFlowMetric(t *testing.T) { + assert := assert.New(t) + + js, err := CreateHealthDashboard("netobserv", []string{}) + assert.NoError(err) + + d, err := test.DashboardFromBytes([]byte(js)) + assert.NoError(err) + + assert.Equal("NetObserv / Health", d.Title) + assert.Equal([]string{ + "Flows", + "Agents", + "Processor", + "Operator", + }, d.Titles()) +} diff --git a/pkg/dashboards/health.go b/pkg/dashboards/health.go new file mode 100644 index 000000000..cdd74100a --- /dev/null +++ b/pkg/dashboards/health.go @@ -0,0 +1,77 @@ +package dashboards + +import ( + _ "embed" + "encoding/json" + "strings" +) + +//go:embed infra_health_dashboard.json +var healthDashboardEmbed string + +func CreateHealthDashboard(netobsNs string, metrics []string) (string, error) { + var result map[string]any + err := json.Unmarshal([]byte(healthDashboardEmbed), &result) + if err != nil { + return "", err + } + + // return dashboard as is if not containing rows + if result["rows"] == nil { + return healthDashboardEmbed, nil + } + + rows := result["rows"].([]any) + filteredRows := []map[string]any{} + for _, r := range rows { + row := r.(map[string]any) + + if isRowPresent(row, metrics) { + filteredRows = append(filteredRows, row) + } + } + + // return empty if dashboard doesn't contains rows anymore + if len(filteredRows) == 0 { + return "", nil + } + + result["rows"] = filteredRows + bytes, err := json.Marshal(result) + return strings.ReplaceAll(string(bytes), "$NETOBSERV_NS", netobsNs), err +} + +func hasTag(item map[string]any, search string) bool { + if item["tags"] == nil { + return false + } + tags := item["tags"].([]any) + for _, t := range tags { + tag := t.(string) + if tag == search { + return true + } + } + return false +} + +func isRowPresent(row map[string]any, metrics []string) bool { + if !hasTag(row, "dynamic") { + return true + } + panels := row["panels"].([]any) + for _, p := range panels { + panel := p.(map[string]any) + targets := panel["targets"].([]any) + for _, t := range targets { + target := t.(map[string]any) + expr := target["expr"].(string) + for _, metric := range metrics { + if strings.Contains(expr, metric) { + return true + } + } + } + } + return false +} diff --git a/controllers/infra_health_dashboard.json b/pkg/dashboards/infra_health_dashboard.json similarity index 98% rename from controllers/infra_health_dashboard.json rename to pkg/dashboards/infra_health_dashboard.json index 206d669eb..970beeecd 100644 --- a/controllers/infra_health_dashboard.json +++ b/pkg/dashboards/infra_health_dashboard.json @@ -121,9 +121,6 @@ "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "tags": [ - "flows" - ], "title": "Flows", "titleSize": "h6" }, @@ -222,9 +219,7 @@ "repeatRowId": null, "showTitle": true, "tags": [ - "flows", - "overhead", - "flows-overhead" + "dynamic" ], "title": "Flows Overhead", "titleSize": "h6" @@ -323,11 +318,9 @@ ], "showTitle": true, "tags": [ - "flows", - "nodes", - "nodes-flows" + "dynamic" ], - "title": "Top flow rates per source and destination nodes (1-min rates)" + "title": "Top flow rates per source and destination nodes" }, { "collapse": false, @@ -509,11 +502,9 @@ ], "showTitle": true, "tags": [ - "flows", - "namespaces", - "namespaces-flows" + "dynamic" ], - "title": "Top flow rates per source and destination namespaces (1-min rates)", + "title": "Top flow rates per source and destination namespaces", "titleSize": "h6" }, { @@ -694,11 +685,9 @@ ], "showTitle": true, "tags": [ - "flows", - "workloads", - "workloads-flows" + "dynamic" ], - "title": "Top flow rates per source and destination workloads (1-min rates)" + "title": "Top flow rates per source and destination workloads" }, { "collapse": false, diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index 31bff2f55..0ddf85561 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -5,6 +5,7 @@ import ( flowslatest "github.com/netobserv/network-observability-operator/api/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" + "github.com/netobserv/network-observability-operator/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -135,3 +136,10 @@ func IsOwned(obj client.Object) bool { refs := obj.GetOwnerReferences() return len(refs) > 0 && strings.HasPrefix(refs[0].APIVersion, flowslatest.GroupVersion.Group) } + +func GetIncludeList(spec *flowslatest.FLPMetrics) []string { + if spec.IncludeList == nil { + return metrics.DefaultIncludeList + } + return *spec.IncludeList +} diff --git a/pkg/metrics/predefined_metrics.go b/pkg/metrics/predefined_metrics.go new file mode 100644 index 000000000..0944d91d1 --- /dev/null +++ b/pkg/metrics/predefined_metrics.go @@ -0,0 +1,133 @@ +package metrics + +import ( + "fmt" + "reflect" + "strings" + + flpapi "github.com/netobserv/flowlogs-pipeline/pkg/api" +) + +const ( + tagNamespaces = "namespaces" + tagNodes = "nodes" + tagWorkloads = "workloads" + tagIngress = "ingress" + tagEgress = "egress" + tagBytes = "bytes" + tagPackets = "packets" +) + +var ( + mapLabels = map[string][]string{ + tagNodes: {"SrcK8S_HostName", "DstK8S_HostName"}, + tagNamespaces: {"SrcK8S_Namespace", "DstK8S_Namespace"}, + tagWorkloads: {"SrcK8S_Namespace", "DstK8S_Namespace", "SrcK8S_OwnerName", "DstK8S_OwnerName", "SrcK8S_OwnerType", "DstK8S_OwnerType"}, + } + mapValueFields = map[string]string{ + tagBytes: "Bytes", + tagPackets: "Packets", + } + mapDirection = map[string]string{ + tagIngress: "0|2", + tagEgress: "1|2", + } + predefinedMetrics []taggedMetricDefinition + // Note that we set default in-code rather than in CRD, in order to keep track of value being unset or set intentionnally in FlowCollector + DefaultIncludeList = []string{"node_ingress_bytes_total", "workload_ingress_bytes_total", "namespace_flows_total"} + // Pre-deprecation default IgnoreTags list (1.4) - used before switching to whitelist approach, + // to make sure there is no unintended new metrics being collected + // Don't add anything here: this is not meant to evolve + defaultIgnoreTags1_4 = []string{"egress", "packets", "nodes-flows", "namespaces-flows", "workloads-flows", "namespaces"} +) + +type taggedMetricDefinition struct { + flpapi.PromMetricsItem + tags []string +} + +func init() { + for _, group := range []string{tagNodes, tagNamespaces, tagWorkloads} { + groupTrimmed := strings.TrimSuffix(group, "s") + labels := mapLabels[group] + // Bytes / packets metrics + for _, vt := range []string{tagBytes, tagPackets} { + valueField := mapValueFields[vt] + for _, dir := range []string{tagEgress, tagIngress} { + predefinedMetrics = append(predefinedMetrics, taggedMetricDefinition{ + PromMetricsItem: flpapi.PromMetricsItem{ + Name: fmt.Sprintf("%s_%s_%s_total", groupTrimmed, dir, vt), + Type: "counter", + ValueKey: valueField, + Filters: []flpapi.PromMetricsFilter{ + {Key: "Duplicate", Value: "false"}, + {Key: "FlowDirection", Value: mapDirection[dir]}, + }, + Labels: labels, + }, + tags: []string{group, vt, dir}, + }) + } + } + // Flows metrics + predefinedMetrics = append(predefinedMetrics, taggedMetricDefinition{ + PromMetricsItem: flpapi.PromMetricsItem{ + Name: fmt.Sprintf("%s_flows_total", groupTrimmed), + Type: "counter", + Labels: labels, + }, + tags: []string{group, group + "-flows", "flows"}, + }) + } +} + +func isIgnored(def *taggedMetricDefinition, ignoreTags []string) bool { + for _, ignoreTag := range ignoreTags { + for _, tag := range def.tags { + if ignoreTag == tag { + return true + } + } + } + return false +} + +func convertIgnoreTagsToIncludeList(ignoreTags []string) []string { + ret := []string{} + for i := range predefinedMetrics { + if !isIgnored(&predefinedMetrics[i], ignoreTags) { + ret = append(ret, predefinedMetrics[i].Name) + } + } + return ret +} + +func GetEnabledNames(ignoreTags []string, includeList *[]string) []string { + if includeList == nil { + if reflect.DeepEqual(ignoreTags, defaultIgnoreTags1_4) { + return DefaultIncludeList + } + return convertIgnoreTagsToIncludeList(ignoreTags) + } + return *includeList +} + +func GetAllNames() []string { + names := []string{} + for i := range predefinedMetrics { + names = append(names, predefinedMetrics[i].Name) + } + return names +} + +func GetDefinitions(names []string) []flpapi.PromMetricsItem { + ret := []flpapi.PromMetricsItem{} + for i := range predefinedMetrics { + for _, name := range names { + if predefinedMetrics[i].Name == name { + ret = append(ret, predefinedMetrics[i].PromMetricsItem) + } + } + } + return ret +} diff --git a/pkg/metrics/predefined_metrics_test.go b/pkg/metrics/predefined_metrics_test.go new file mode 100644 index 000000000..e2a2108ef --- /dev/null +++ b/pkg/metrics/predefined_metrics_test.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIncludeExclude(t *testing.T) { + assert := assert.New(t) + + // IgnoreTags set, Include list unset => resolving ignore tags + res := GetEnabledNames([]string{"egress", "packets", "flows"}, nil) + assert.Equal([]string{"node_ingress_bytes_total", "namespace_ingress_bytes_total", "workload_ingress_bytes_total"}, res) + + // IgnoreTags set, Include list set => keep include list + res = GetEnabledNames([]string{"egress", "packets"}, &[]string{"namespace_flows_total"}) + assert.Equal([]string{"namespace_flows_total"}, res) + + // IgnoreTags set as defaults, Include list unset => use default include list + res = GetEnabledNames([]string{"egress", "packets", "nodes-flows", "namespaces-flows", "workloads-flows", "namespaces"}, nil) + assert.Equal(DefaultIncludeList, res) + + // IgnoreTags set as defaults, Include list set => use include list + res = GetEnabledNames([]string{"egress", "packets", "nodes-flows", "namespaces-flows", "workloads-flows", "namespaces"}, &[]string{"namespace_flows_total"}) + assert.Equal([]string{"namespace_flows_total"}, res) +} + +func TestGetDefinitions(t *testing.T) { + assert := assert.New(t) + + res := GetDefinitions([]string{"namespace_flows_total", "node_ingress_bytes_total", "workload_egress_packets_total"}) + assert.Len(res, 3) + assert.Equal("node_ingress_bytes_total", res[0].Name) + assert.Equal("Bytes", res[0].ValueKey) + assert.Equal([]string{"SrcK8S_HostName", "DstK8S_HostName"}, res[0].Labels) + assert.Equal("namespace_flows_total", res[1].Name) + assert.Empty(res[1].ValueKey) + assert.Equal([]string{"SrcK8S_Namespace", "DstK8S_Namespace"}, res[1].Labels) + assert.Equal("workload_egress_packets_total", res[2].Name) + assert.Equal("Packets", res[2].ValueKey) + assert.Equal([]string{"SrcK8S_Namespace", "DstK8S_Namespace", "SrcK8S_OwnerName", "DstK8S_OwnerName", "SrcK8S_OwnerType", "DstK8S_OwnerType"}, res[2].Labels) +} diff --git a/pkg/test/dashboards.go b/pkg/test/dashboards.go new file mode 100644 index 000000000..30445320e --- /dev/null +++ b/pkg/test/dashboards.go @@ -0,0 +1,33 @@ +package test + +import ( + "encoding/json" +) + +type Dashboard struct { + Rows []struct { + Panels []struct { + Targets []struct { + Expr string `json:"expr"` + LegendFormat string `json:"legendFormat"` + } `json:"targets"` + Title string `json:"title"` + } `json:"panels"` + Title string `json:"title"` + } `json:"rows"` + Title string `json:"title"` +} + +func DashboardFromBytes(b []byte) (*Dashboard, error) { + var d Dashboard + err := json.Unmarshal(b, &d) + return &d, err +} + +func (d *Dashboard) Titles() []string { + var titles []string + for _, r := range d.Rows { + titles = append(titles, r.Title) + } + return titles +}