From 24c78266cd579efc990f817b01f32f2fb18bdd09 Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Fri, 3 Nov 2023 12:43:06 -0400 Subject: [PATCH 01/16] Update logTypes enum --- api/v1beta2/flowcollector_types.go | 20 +++++++++---------- .../flows.netobserv.io_flowcollectors.yaml | 18 ++++++++--------- ...observ-operator.clusterserviceversion.yaml | 2 +- .../flows.netobserv.io_flowcollectors.yaml | 18 ++++++++--------- .../samples/flows_v1beta2_flowcollector.yaml | 2 +- docs/FlowCollector.md | 6 +++--- ...ned.flows.netobserv.io_flowcollectors.yaml | 12 +++++------ 7 files changed, 39 insertions(+), 39 deletions(-) diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index d5e6fc1ad..3180a6887 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -363,10 +363,10 @@ type FLPMetrics struct { } const ( - LogTypeFlows = "FLOWS" - LogTypeConversations = "CONVERSATIONS" - LogTypeEndedConversations = "ENDED_CONVERSATIONS" - LogTypeAll = "ALL" + LogTypeFlows = "Flows" + LogTypeConversations = "Conversations" + LogTypeEndedConversations = "EndedConversations" + LogTypeAll = "All" ) // `FlowCollectorFLP` defines the desired flowlogs-pipeline state of FlowCollector @@ -442,13 +442,13 @@ type FlowCollectorFLP struct { KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` // `logTypes` defines the desired record types to generate. Possible values are:
- // - `FLOWS` (default) to export regular network flows
- // - `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- // - `ENDED_CONVERSATIONS` to generate only ended conversations events
- // - `ALL` to generate both network flows and all conversations events
+ // - `Flows` (default) to export regular network flows
+ // - `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates
+ // - `EndedConversations` to generate only ended conversations events
+ // - `All` to generate both network flows and all conversations events
// +kubebuilder:validation:Optional - // +kubebuilder:validation:Enum:="FLOWS";"CONVERSATIONS";"ENDED_CONVERSATIONS";"ALL" - // +kubebuilder:default:=FLOWS + // +kubebuilder:validation:Enum:="Flows";"Conversations";"EndedConversations";"All" + // +kubebuilder:default:=Flows LogTypes *string `json:"logTypes,omitempty"` //+kubebuilder:default:="30s" diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 278f34968..8597e2a90 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -7667,19 +7667,19 @@ spec: - panic type: string logTypes: - default: FLOWS + default: Flows description: '`logTypes` defines the desired record types to generate. - Possible values are:
- `FLOWS` (default) to export regular - network flows
- `CONVERSATIONS` to generate events for started + Possible values are:
- `Flows` (default) to export regular + network flows
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" - updates
- `ENDED_CONVERSATIONS` to generate only ended conversations - events
- `ALL` to generate both network flows and all conversations + updates
- `EndedConversations` to generate only ended conversations + events
- `All` to generate both network flows and all conversations events
' enum: - - FLOWS - - CONVERSATIONS - - ENDED_CONVERSATIONS - - ALL + - Flows + - Conversations + - EndedConversations + - All type: string metrics: description: '`Metrics` define the processor configuration regarding diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 3905d16a0..7ae5d4ed4 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -496,7 +496,7 @@ metadata: "kafkaConsumerQueueCapacity": 1000, "kafkaConsumerReplicas": 3, "logLevel": "info", - "logTypes": "FLOWS", + "logTypes": "Flows", "metrics": { "disableAlerts": [], "includeList": [ diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 72f629672..5ea78e498 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -7653,19 +7653,19 @@ spec: - panic type: string logTypes: - default: FLOWS + default: Flows description: '`logTypes` defines the desired record types to generate. - Possible values are:
- `FLOWS` (default) to export regular - network flows
- `CONVERSATIONS` to generate events for started + Possible values are:
- `Flows` (default) to export regular + network flows
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" - updates
- `ENDED_CONVERSATIONS` to generate only ended conversations - events
- `ALL` to generate both network flows and all conversations + updates
- `EndedConversations` to generate only ended conversations + events
- `All` to generate both network flows and all conversations events
' enum: - - FLOWS - - CONVERSATIONS - - ENDED_CONVERSATIONS - - ALL + - Flows + - Conversations + - EndedConversations + - All type: string metrics: description: '`Metrics` define the processor configuration regarding diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml index ba65a45e0..52143b988 100644 --- a/config/samples/flows_v1beta2_flowcollector.yaml +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -51,7 +51,7 @@ spec: kafkaConsumerAutoscaler: null kafkaConsumerQueueCapacity: 1000 kafkaConsumerBatchSize: 10485760 - logTypes: FLOWS + logTypes: Flows conversationTerminatingTimeout: 5s conversationHeartbeatInterval: 30s conversationEndTimeout: 10s diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index b5f52f3a4..7c267402e 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -12694,10 +12694,10 @@ TLS client configuration for Loki URL. logTypes enum - `logTypes` defines the desired record types to generate. Possible values are:
- `FLOWS` (default) to export regular network flows
- `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `ENDED_CONVERSATIONS` to generate only ended conversations events
- `ALL` to generate both network flows and all conversations events

+ `logTypes` defines the desired record types to generate. Possible values are:
- `Flows` (default) to export regular network flows
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `EndedConversations` to generate only ended conversations events
- `All` to generate both network flows and all conversations events


- Enum: FLOWS, CONVERSATIONS, ENDED_CONVERSATIONS, ALL
- Default: FLOWS
+ Enum: Flows, Conversations, EndedConversations, All
+ Default: Flows
false diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index 6d06de434..6407bf72b 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -5299,13 +5299,13 @@ spec: - panic type: string logTypes: - default: FLOWS - description: '`logTypes` defines the desired record types to generate. Possible values are:
- `FLOWS` (default) to export regular network flows
- `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `ENDED_CONVERSATIONS` to generate only ended conversations events
- `ALL` to generate both network flows and all conversations events
' + default: Flows + description: '`logTypes` defines the desired record types to generate. Possible values are:
- `Flows` (default) to export regular network flows
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `EndedConversations` to generate only ended conversations events
- `All` to generate both network flows and all conversations events
' enum: - - FLOWS - - CONVERSATIONS - - ENDED_CONVERSATIONS - - ALL + - Flows + - Conversations + - EndedConversations + - All type: string metrics: description: '`Metrics` define the processor configuration regarding metrics' From 8dc2ce38ae05144319b438b33798775e0940080b Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Fri, 3 Nov 2023 17:09:52 -0400 Subject: [PATCH 02/16] Update camelCase for other fields --- api/v1alpha1/flowcollector_webhook.go | 10 +- api/v1alpha1/zz_generated.conversion.go | 162 +---------------- api/v1beta1/zz_generated.conversion.go | 168 +----------------- api/v1beta2/flowcollector_types.go | 140 +++++++-------- api/v1beta2/zz_generated.deepcopy.go | 28 +-- .../flows.netobserv.io_flowcollectors.yaml | 134 +++++++------- ...observ-operator.clusterserviceversion.yaml | 14 +- .../flows.netobserv.io_flowcollectors.yaml | 134 +++++++------- config/descriptions/ocp.md | 4 +- config/descriptions/upstream.md | 4 +- config/manager/kustomization.yaml | 4 +- .../samples/flows_v1beta2_flowcollector.yaml | 10 +- .../consoleplugin/consoleplugin_objects.go | 8 +- controllers/ebpf/agent_controller.go | 58 +++--- .../ebpf/internal/permissions/permissions.go | 6 +- controllers/flowcollector_controller.go | 6 +- .../flowcollector_controller_ebpf_test.go | 10 +- .../flowcollector_controller_iso_test.go | 16 +- controllers/flowcollector_controller_test.go | 14 +- .../flowlogspipeline/flp_common_objects.go | 18 +- .../flowlogspipeline/flp_ingest_objects.go | 2 +- .../flowlogspipeline/flp_ingest_reconciler.go | 2 +- .../flowlogspipeline/flp_monolith_objects.go | 2 +- controllers/flowlogspipeline/flp_test.go | 4 +- .../flowlogspipeline/flp_transfo_objects.go | 2 +- controllers/ovs/flowsconfig_cno_reconciler.go | 10 +- .../ovs/flowsconfig_ovnk_reconciler.go | 18 +- controllers/ovs/flowsconfig_types.go | 4 +- docs/FlowCollector.md | 90 +++++----- ...ned.flows.netobserv.io_flowcollectors.yaml | 104 +++++------ pkg/helper/flowcollector.go | 24 +-- 31 files changed, 452 insertions(+), 758 deletions(-) diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index 45dabf072..01183b758 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -46,9 +46,9 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { } // Agent - if restored.Spec.Agent.EBPF.Features != nil { - dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) - copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) + if restored.Spec.Agent.Ebpf.Features != nil { + dst.Spec.Agent.Ebpf.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.Ebpf.Features)) + copy(dst.Spec.Agent.Ebpf.Features, restored.Spec.Agent.Ebpf.Features) } // Processor @@ -169,8 +169,8 @@ func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsole // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta1 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) +func Convert_v1beta2_FlowCollectorEbpf_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEbpf, out *FlowCollectorEBPF, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEbpf_To_v1alpha1_FlowCollectorEBPF(in, out, s) } // // This function need to be manually created because conversion-gen not able to create it intentionally because diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 7f3b86a99..d91507131 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -123,11 +123,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { @@ -153,26 +148,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) }); err != nil { @@ -278,11 +253,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) }); err != nil { @@ -506,36 +476,18 @@ func Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCol func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type + // WARNING: in.EBPF requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + // WARNING: in.Ipfix requires manual conversion: does not exist in peer-type + // WARNING: in.Ebpf requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s) -} - func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { if err := v1.Convert_bool_To_Pointer_bool(&in.Register, &out.Register, s); err != nil { return err @@ -584,52 +536,12 @@ func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorCon return nil } -func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - // WARNING: in.Features requires manual conversion: does not exist in peer-type - return nil -} - func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { out.Type = v1beta2.ExporterType(in.Type) if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIpfixReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -645,7 +557,7 @@ func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIpfixReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -752,68 +664,6 @@ func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.F return autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s) } -func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in, out, s) -} - func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { out.Address = in.Address out.Topic = in.Topic diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index e150d3944..5e3e48dd0 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -128,16 +128,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { @@ -163,26 +153,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) }); err != nil { @@ -504,36 +474,18 @@ func Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in *v1beta2.FlowColl func autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type + // WARNING: in.EBPF requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { out.Type = in.Type - if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + // WARNING: in.Ipfix requires manual conversion: does not exist in peer-type + // WARNING: in.Ebpf requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in, out, s) -} - func autoConvert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { out.Enable = (*bool)(unsafe.Pointer(in.Enable)) out.Register = (*bool)(unsafe.Pointer(in.Register)) @@ -580,58 +532,12 @@ func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsoleP return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s) } -func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - out.Features = *(*[]v1beta2.AgentFeature)(unsafe.Pointer(&in.Features)) - return nil -} - -// Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - out.Features = *(*[]AgentFeature)(unsafe.Pointer(&in.Features)) - return nil -} - -// Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in, out, s) -} - func autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { out.Type = v1beta2.ExporterType(in.Type) if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIpfixReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -647,7 +553,7 @@ func autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter( if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIpfixReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -747,68 +653,6 @@ func Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.Fl return autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s) } -func autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in, out, s) -} - func autoConvert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { out.Address = in.Address out.Topic = in.Topic diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 3180a6887..98fad9620 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -24,10 +24,10 @@ import ( // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. const ( - AgentIPFIX = "IPFIX" - AgentEBPF = "EBPF" - DeploymentModelDirect = "DIRECT" - DeploymentModelKafka = "KAFKA" + AgentIpfix = "Ipfix" + AgentEbpf = "Ebpf" + DeploymentModelDirect = "Direct" + DeploymentModelKafka = "Kafka" ) // Please notice that the FlowCollectorSpec's properties MUST redefine one of the default @@ -64,15 +64,15 @@ type FlowCollectorSpec struct { ConsolePlugin FlowCollectorConsolePlugin `json:"consolePlugin,omitempty"` // `deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- // - `DIRECT` (default) to make the flow processor listening directly from the agents.
- // - `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
+ // - `Direct` (default) to make the flow processor listening directly from the agents.
+ // - `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
// Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). // +unionDiscriminator - // +kubebuilder:validation:Enum:="DIRECT";"KAFKA" - // +kubebuilder:default:=DIRECT + // +kubebuilder:validation:Enum:="Direct";"Kafka" + // +kubebuilder:default:=Direct DeploymentModel string `json:"deploymentModel,omitempty"` - // Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. + // Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`. // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` @@ -86,30 +86,30 @@ type FlowCollectorSpec struct { // +union type FlowCollectorAgent struct { // `type` selects the flows tracing agent. Possible values are:
- // - `EBPF` (default) to use NetObserv eBPF agent.
- // - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- // `EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. - // `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, + // - `Ebpf` (default) to use NetObserv eBPF agent.
+ // - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
+ // `Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. + // `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, // but they would require manual configuration). // +unionDiscriminator - // +kubebuilder:validation:Enum:="EBPF";"IPFIX" - // +kubebuilder:default:=EBPF + // +kubebuilder:validation:Enum:="Ebpf";"Ipfix" + // +kubebuilder:default:=Ebpf Type string `json:"type,omitempty"` - // `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` - // is set to `IPFIX`. + // `ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` + // is set to `Ipfix`. // +optional - IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` + Ipfix FlowCollectorIpfix `json:"ipfix,omitempty"` // `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` - // is set to `EBPF`. + // is set to `Ebpf`. // +optional - EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` + Ebpf FlowCollectorEbpf `json:"ebpf,omitempty"` } -// `FlowCollectorIPFIX` defines a FlowCollector that uses IPFIX on OVN-Kubernetes to collect the +// `FlowCollectorIpfix` defines a FlowCollector that uses Ipfix on OVN-Kubernetes to collect the // flows information -type FlowCollectorIPFIX struct { +type FlowCollectorIpfix struct { // Important: Run "make generate" to regenerate code after modifying this file //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ @@ -127,12 +127,12 @@ type FlowCollectorIPFIX struct { // `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. // To ensure cluster stability, it is not possible to set a value below 2. // If you really want to sample every packet, which might impact the cluster stability, - // refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX. + // refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix. Sampling int32 `json:"sampling,omitempty" mapstructure:"sampling,omitempty"` //+kubebuilder:default:=false - // `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. - // It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. + // `forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. + // It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. // If you REALLY want to do that, set this flag to `true`. Use at your own risk. // When it is set to `true`, the value of `sampling` is ignored. ForceSampleAll bool `json:"forceSampleAll,omitempty" mapstructure:"-"` @@ -140,7 +140,7 @@ type FlowCollectorIPFIX struct { // `clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available. ClusterNetworkOperator ClusterNetworkOperatorConfig `json:"clusterNetworkOperator,omitempty" mapstructure:"-"` - // `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. + // `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` } @@ -157,8 +157,8 @@ const ( FlowRTT AgentFeature = "FlowRTT" ) -// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information -type FlowCollectorEBPF struct { +// `FlowCollectorEbpf` defines a FlowCollector that uses eBPF to collect the flows information +type FlowCollectorEbpf struct { // Important: Run "make generate" to regenerate code after modifying this file //+kubebuilder:validation:Enum=IfNotPresent;Always;Never @@ -263,15 +263,15 @@ type FlowCollectorKafka struct { SASL SASLConfig `json:"sasl"` } -type FlowCollectorIPFIXReceiver struct { +type FlowCollectorIpfixReceiver struct { //+kubebuilder:default:="" - // Address of the IPFIX external receiver + // Address of the Ipfix external receiver TargetHost string `json:"targetHost"` - // Port for the IPFIX external receiver + // Port for the Ipfix external receiver TargetPort int `json:"targetPort"` - // Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. + // Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`. // +unionDiscriminator // +kubebuilder:validation:Enum:="TCP";"UDP" // +optional @@ -279,9 +279,9 @@ type FlowCollectorIPFIXReceiver struct { } const ( - ServerTLSDisabled = "DISABLED" - ServerTLSProvided = "PROVIDED" - ServerTLSAuto = "AUTO" + ServerTLSDisabled = "Disabled" + ServerTLSProvided = "Provided" + ServerTLSAuto = "Auto" ) type ServerTLSConfigType string @@ -289,16 +289,16 @@ type ServerTLSConfigType string // `ServerTLS` define the TLS configuration, server side type ServerTLS struct { // Select the type of TLS configuration:
- // - `DISABLED` (default) to not configure TLS for the endpoint. - // - `PROVIDED` to manually provide cert file and a key file. - // - `AUTO` to use OpenShift auto generated certificate using annotations. + // - `Disabled` (default) to not configure TLS for the endpoint. + // - `Provided` to manually provide cert file and a key file. + // - `Auto` to use OpenShift auto generated certificate using annotations. // +unionDiscriminator - // +kubebuilder:validation:Enum:="DISABLED";"PROVIDED";"AUTO" + // +kubebuilder:validation:Enum:="Disabled";"Provided";"Auto" // +kubebuilder:validation:Required - //+kubebuilder:default:="DISABLED" + //+kubebuilder:default:="Disabled" Type ServerTLSConfigType `json:"type,omitempty"` - // TLS configuration when `type` is set to `PROVIDED`. + // TLS configuration when `type` is set to `Provided`. // +optional Provided *CertificateReference `json:"provided"` @@ -307,7 +307,7 @@ type ServerTLS struct { // If set to `true`, the `providedCaFile` field is ignored. InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` - // Reference to the CA file when `type` is set to `PROVIDED`. + // Reference to the CA file when `type` is set to `Provided`. // +optional ProvidedCaFile *FileReference `json:"providedCaFile,omitempty"` } @@ -480,16 +480,16 @@ type FlowCollectorFLP struct { } const ( - HPAStatusDisabled = "DISABLED" - HPAStatusEnabled = "ENABLED" + HPAStatusDisabled = "Disabled" + HPAStatusEnabled = "Enabled" ) type FlowCollectorHPA struct { - // +kubebuilder:validation:Enum:=DISABLED;ENABLED - // +kubebuilder:default:=DISABLED + // +kubebuilder:validation:Enum:=Disabled;Enabled + // +kubebuilder:default:=Disabled // `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- // - `DISABLED` does not deploy an horizontal pod autoscaler.
- // - `ENABLED` deploys an horizontal pod autoscaler.
+ // - `Disabled` does not deploy an horizontal pod autoscaler.
+ // - `Enabled` deploys an horizontal pod autoscaler.
Status string `json:"status,omitempty"` // `minReplicas` is the lower limit for the number of replicas to which the autoscaler @@ -511,9 +511,9 @@ type FlowCollectorHPA struct { } const ( - LokiAuthDisabled = "DISABLED" - LokiAuthUseHostToken = "HOST" - LokiAuthForwardUserToken = "FORWARD" + LokiAuthDisabled = "Disabled" + LokiAuthUseHostToken = "Host" + LokiAuthForwardUserToken = "Forward" ) // `LokiManualParams` defines the full connection parameters to Loki. @@ -544,13 +544,13 @@ type LokiManualParams struct { // When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. TenantID string `json:"tenantID,omitempty"` - //+kubebuilder:validation:Enum:="DISABLED";"HOST";"FORWARD" - //+kubebuilder:default:="DISABLED" + //+kubebuilder:validation:Enum:="Disabled";"Host";"Forward" + //+kubebuilder:default:="Disabled" // `authToken` describes the way to get a token to authenticate to Loki.
- // - `DISABLED` does not send any token with the request.
- // - `FORWARD` forwards the user token for authorization.
- // - `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
- // When using the Loki Operator, this must be set to `FORWARD`. + // - `Disabled` does not send any token with the request.
+ // - `Forward` forwards the user token for authorization.
+ // - `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
+ // When using the Loki Operator, this must be set to `Forward`. AuthToken string `json:"authToken,omitempty"` // TLS client configuration for Loki URL. @@ -866,16 +866,16 @@ type ClientTLS struct { type SASLType string const ( - SASLDisabled SASLType = "DISABLED" - SASLPlain SASLType = "PLAIN" - SASLScramSHA512 SASLType = "SCRAM-SHA512" + SASLDisabled SASLType = "Disabled" + SASLPlain SASLType = "Plain" + SASLScramSHA512 SASLType = "ScramSHA512" ) // `SASLConfig` defines SASL configuration type SASLConfig struct { - //+kubebuilder:validation:Enum=DISABLED;PLAIN;SCRAM-SHA512 - //+kubebuilder:default:=DISABLED - // Type of SASL authentication to use, or `DISABLED` if SASL is not used + //+kubebuilder:validation:Enum=Disabled;Plain;ScramSHA512 + //+kubebuilder:default:=Disabled + // Type of SASL authentication to use, or `Disabled` if SASL is not used Type SASLType `json:"type,omitempty"` // Reference to the secret or config map containing the client ID @@ -900,15 +900,15 @@ type DebugConfig struct { type ExporterType string const ( - KafkaExporter ExporterType = "KAFKA" - IpfixExporter ExporterType = "IPFIX" + KafkaExporter ExporterType = "Kafka" + IpfixExporter ExporterType = "Ipfix" ) // `FlowCollectorExporter` defines an additional exporter to send enriched flows to. type FlowCollectorExporter struct { - // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. + // `type` selects the type of exporters. The available options are `Kafka` and `Ipfix`. // +unionDiscriminator - // +kubebuilder:validation:Enum:="KAFKA";"IPFIX" + // +kubebuilder:validation:Enum:="Kafka";"Ipfix" // +kubebuilder:validation:Required Type ExporterType `json:"type"` @@ -916,9 +916,9 @@ type FlowCollectorExporter struct { // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` - // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. + // Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to. // +optional - IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` + IPFIX FlowCollectorIpfixReceiver `json:"ipfix,omitempty"` } // `FlowCollectorStatus` defines the observed state of FlowCollector @@ -936,7 +936,7 @@ type FlowCollectorStatus struct { // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster // +kubebuilder:printcolumn:name="Agent",type="string",JSONPath=`.spec.agent.type` -// +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` +// +kubebuilder:printcolumn:name="Sampling (Ebpf)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 74f820cfc..d1a48062b 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -198,8 +198,8 @@ func (in *FlowCollector) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowCollectorAgent) DeepCopyInto(out *FlowCollectorAgent) { *out = *in - out.IPFIX = in.IPFIX - in.EBPF.DeepCopyInto(&out.EBPF) + out.Ipfix = in.Ipfix + in.Ebpf.DeepCopyInto(&out.Ebpf) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorAgent. @@ -253,7 +253,7 @@ func (in *FlowCollectorConsolePlugin) DeepCopy() *FlowCollectorConsolePlugin { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { +func (in *FlowCollectorEbpf) DeepCopyInto(out *FlowCollectorEbpf) { *out = *in in.Resources.DeepCopyInto(&out.Resources) if in.Sampling != nil { @@ -279,12 +279,12 @@ func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEBPF. -func (in *FlowCollectorEBPF) DeepCopy() *FlowCollectorEBPF { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEbpf. +func (in *FlowCollectorEbpf) DeepCopy() *FlowCollectorEbpf { if in == nil { return nil } - out := new(FlowCollectorEBPF) + out := new(FlowCollectorEbpf) in.DeepCopyInto(out) return out } @@ -388,33 +388,33 @@ func (in *FlowCollectorHPA) DeepCopy() *FlowCollectorHPA { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIX) DeepCopyInto(out *FlowCollectorIPFIX) { +func (in *FlowCollectorIpfix) DeepCopyInto(out *FlowCollectorIpfix) { *out = *in out.ClusterNetworkOperator = in.ClusterNetworkOperator out.OVNKubernetes = in.OVNKubernetes } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIX. -func (in *FlowCollectorIPFIX) DeepCopy() *FlowCollectorIPFIX { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIpfix. +func (in *FlowCollectorIpfix) DeepCopy() *FlowCollectorIpfix { if in == nil { return nil } - out := new(FlowCollectorIPFIX) + out := new(FlowCollectorIpfix) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIXReceiver) DeepCopyInto(out *FlowCollectorIPFIXReceiver) { +func (in *FlowCollectorIpfixReceiver) DeepCopyInto(out *FlowCollectorIpfixReceiver) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIXReceiver. -func (in *FlowCollectorIPFIXReceiver) DeepCopy() *FlowCollectorIPFIXReceiver { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIpfixReceiver. +func (in *FlowCollectorIpfixReceiver) DeepCopy() *FlowCollectorIpfixReceiver { if in == nil { return nil } - out := new(FlowCollectorIPFIXReceiver) + out := new(FlowCollectorIpfixReceiver) in.DeepCopyInto(out) return out } diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 8597e2a90..fd10ccadb 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -5108,7 +5108,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) + name: Sampling (Ebpf) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -5148,7 +5148,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `EBPF`.' + flow reporter when `spec.agent.type` is set to `Ebpf`.' properties: cacheActiveTimeout: default: 5s @@ -5339,8 +5339,8 @@ spec: type: object ipfix: description: '`ipfix` [deprecated (*)] - describes the settings - related to the IPFIX-based flow reporter when `spec.agent.type` - is set to `IPFIX`.' + related to the Ipfix-based flow reporter when `spec.agent.type` + is set to `Ipfix`.' properties: cacheActiveTimeout: default: 20s @@ -5369,8 +5369,8 @@ spec: forceSampleAll: default: false description: '`forceSampleAll` allows disabling sampling in - the IPFIX-based flow reporter. It is not recommended to - sample all the traffic with IPFIX, as it might generate + the Ipfix-based flow reporter. It is not recommended to + sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' @@ -5378,7 +5378,7 @@ spec: ovnKubernetes: description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is - used when using OVN''s IPFIX exports, without OpenShift. + used when using OVN''s Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: @@ -5404,23 +5404,23 @@ spec: it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you - can use the eBPF Agent instead of IPFIX.' + can use the eBPF Agent instead of Ipfix.' format: int32 minimum: 2 type: integer type: object type: - default: EBPF + default: Ebpf description: '`type` selects the flows tracing agent. Possible - values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `EBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` + values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
+ - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
+ `Ebpf` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + support exporting Ipfix, but they would require manual configuration).' enum: - - EBPF - - IPFIX + - Ebpf + - Ipfix type: string type: object consolePlugin: @@ -5946,14 +5946,14 @@ spec: format: int32 type: integer status: - default: DISABLED + default: Disabled description: '`status` describes the desired status regarding - deploying an horizontal pod autoscaler.
- `DISABLED` - does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploying an horizontal pod autoscaler.
- `Disabled` + does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' enum: - - DISABLED - - ENABLED + - Disabled + - Enabled type: string type: object enable: @@ -6128,16 +6128,16 @@ spec: type: object type: object deploymentModel: - default: DIRECT + default: Direct description: '`deploymentModel` defines the desired type of deployment - for flow processing. Possible values are:
- `DIRECT` (default) + for flow processing. Possible values are:
- `Direct` (default) to make the flow processor listening directly from the agents.
- - `KAFKA` to make flows sent to a Kafka pipeline before consumption + - `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' enum: - - DIRECT - - KAFKA + - Direct + - Kafka type: string exporters: description: '`exporters` define additional optional exporters for @@ -6147,19 +6147,19 @@ spec: to send enriched flows to.' properties: ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. + description: Ipfix configuration, such as the IP address and + port to send enriched Ipfix flows to. properties: targetHost: default: "" - description: Address of the IPFIX external receiver + description: Address of the Ipfix external receiver type: string targetPort: - description: Port for the IPFIX external receiver + description: Port for the Ipfix external receiver type: integer transport: description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. + for the Ipfix connection, defaults to `TCP`. enum: - TCP - UDP @@ -6239,13 +6239,13 @@ spec: type: string type: object type: - default: DISABLED + default: Disabled description: Type of SASL authentication to use, or - `DISABLED` if SASL is not used + `Disabled` if SASL is not used enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 + - Disabled + - Plain + - ScramSHA512 type: string type: object tls: @@ -6346,10 +6346,10 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`.' + options are `Kafka` and `Ipfix`.' enum: - - KAFKA - - IPFIX + - Kafka + - Ipfix type: string required: - type @@ -6358,7 +6358,7 @@ spec: kafka: description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` - is `KAFKA`. + is `Kafka`. properties: address: default: "" @@ -6422,13 +6422,13 @@ spec: type: string type: object type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` + default: Disabled + description: Type of SASL authentication to use, or `Disabled` if SASL is not used enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 + - Disabled + - Plain + - ScramSHA512 type: string type: object tls: @@ -6562,18 +6562,18 @@ spec: most flexible configuration. It is ignored for other modes. properties: authToken: - default: DISABLED + default: Disabled description: '`authToken` describes the way to get a token - to authenticate to Loki.
- `DISABLED` does not send - any token with the request.
- `FORWARD` forwards the - user token for authorization.
- `HOST` [deprecated (*)] + to authenticate to Loki.
- `Disabled` does not send + any token with the request.
- `Forward` forwards the + user token for authorization.
- `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set - to `FORWARD`.' + to `Forward`.' enum: - - DISABLED - - HOST - - FORWARD + - Disabled + - Host + - Forward type: string ingesterUrl: default: http://loki:3100/ @@ -7624,14 +7624,14 @@ spec: format: int32 type: integer status: - default: DISABLED + default: Disabled description: '`status` describes the desired status regarding - deploying an horizontal pod autoscaler.
- `DISABLED` - does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploying an horizontal pod autoscaler.
- `Disabled` + does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' enum: - - DISABLED - - ENABLED + - Disabled + - Enabled type: string type: object kafkaConsumerBatchSize: @@ -7741,7 +7741,7 @@ spec: type: boolean provided: description: TLS configuration when `type` is set - to `PROVIDED`. + to `Provided`. properties: certFile: description: '`certFile` defines the path to the @@ -7777,7 +7777,7 @@ spec: type: object providedCaFile: description: Reference to the CA file when `type` - is set to `PROVIDED`. + is set to `Provided`. properties: file: description: File name within the config map or @@ -7805,16 +7805,16 @@ spec: type: string type: object type: - default: DISABLED + default: Disabled description: Select the type of TLS configuration:
- - `DISABLED` (default) to not configure TLS for - the endpoint. - `PROVIDED` to manually provide cert - file and a key file. - `AUTO` to use OpenShift auto + - `Disabled` (default) to not configure TLS for + the endpoint. - `Provided` to manually provide cert + file and a key file. - `Auto` to use OpenShift auto generated certificate using annotations. enum: - - DISABLED - - PROVIDED - - AUTO + - Disabled + - Provided + - Auto type: string type: object type: object diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 7ae5d4ed4..a691119c5 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -378,7 +378,7 @@ metadata: }, "sampling": 50 }, - "type": "EBPF" + "type": "Ebpf" }, "consolePlugin": { "autoscaler": { @@ -396,7 +396,7 @@ metadata: } ], "minReplicas": 1, - "status": "DISABLED" + "status": "Disabled" }, "imagePullPolicy": "IfNotPresent", "logLevel": "info", @@ -440,7 +440,7 @@ metadata: ], "register": true }, - "deploymentModel": "DIRECT", + "deploymentModel": "Direct", "exporters": [], "kafka": { "address": "kafka-cluster-kafka-bootstrap.netobserv", @@ -526,7 +526,7 @@ metadata: capabilities: Seamless Upgrades categories: Monitoring console.openshift.io/plugins: '["netobserv-plugin"]' - containerImage: quay.io/netobserv/network-observability-operator:1.0.4 + containerImage: quay.io/amoghrd/network-observability-operator:main createdAt: ':created-at:' description: Network flows collector and monitoring solution operatorframework.io/suggested-namespace: openshift-netobserv-operator @@ -598,7 +598,7 @@ spec: ## Configuration - The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). + The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -616,7 +616,7 @@ spec: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. - - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). + - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -886,7 +886,7 @@ spec: - name: DOWNSTREAM_DEPLOYMENT value: "false" - name: PROFILING_BIND_ADDRESS - image: quay.io/netobserv/network-observability-operator:1.0.4 + image: quay.io/amoghrd/network-observability-operator:main imagePullPolicy: Always livenessProbe: httpGet: diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 5ea78e498..1b2fc4bb3 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -5094,7 +5094,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) + name: Sampling (Ebpf) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -5134,7 +5134,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `EBPF`.' + flow reporter when `spec.agent.type` is set to `Ebpf`.' properties: cacheActiveTimeout: default: 5s @@ -5325,8 +5325,8 @@ spec: type: object ipfix: description: '`ipfix` [deprecated (*)] - describes the settings - related to the IPFIX-based flow reporter when `spec.agent.type` - is set to `IPFIX`.' + related to the Ipfix-based flow reporter when `spec.agent.type` + is set to `Ipfix`.' properties: cacheActiveTimeout: default: 20s @@ -5355,8 +5355,8 @@ spec: forceSampleAll: default: false description: '`forceSampleAll` allows disabling sampling in - the IPFIX-based flow reporter. It is not recommended to - sample all the traffic with IPFIX, as it might generate + the Ipfix-based flow reporter. It is not recommended to + sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' @@ -5364,7 +5364,7 @@ spec: ovnKubernetes: description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is - used when using OVN''s IPFIX exports, without OpenShift. + used when using OVN''s Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: @@ -5390,23 +5390,23 @@ spec: it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you - can use the eBPF Agent instead of IPFIX.' + can use the eBPF Agent instead of Ipfix.' format: int32 minimum: 2 type: integer type: object type: - default: EBPF + default: Ebpf description: '`type` selects the flows tracing agent. Possible - values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `EBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` + values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
+ - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
+ `Ebpf` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + support exporting Ipfix, but they would require manual configuration).' enum: - - EBPF - - IPFIX + - Ebpf + - Ipfix type: string type: object consolePlugin: @@ -5932,14 +5932,14 @@ spec: format: int32 type: integer status: - default: DISABLED + default: Disabled description: '`status` describes the desired status regarding - deploying an horizontal pod autoscaler.
- `DISABLED` - does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploying an horizontal pod autoscaler.
- `Disabled` + does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' enum: - - DISABLED - - ENABLED + - Disabled + - Enabled type: string type: object enable: @@ -6114,16 +6114,16 @@ spec: type: object type: object deploymentModel: - default: DIRECT + default: Direct description: '`deploymentModel` defines the desired type of deployment - for flow processing. Possible values are:
- `DIRECT` (default) + for flow processing. Possible values are:
- `Direct` (default) to make the flow processor listening directly from the agents.
- - `KAFKA` to make flows sent to a Kafka pipeline before consumption + - `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' enum: - - DIRECT - - KAFKA + - Direct + - Kafka type: string exporters: description: '`exporters` define additional optional exporters for @@ -6133,19 +6133,19 @@ spec: to send enriched flows to.' properties: ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. + description: Ipfix configuration, such as the IP address and + port to send enriched Ipfix flows to. properties: targetHost: default: "" - description: Address of the IPFIX external receiver + description: Address of the Ipfix external receiver type: string targetPort: - description: Port for the IPFIX external receiver + description: Port for the Ipfix external receiver type: integer transport: description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. + for the Ipfix connection, defaults to `TCP`. enum: - TCP - UDP @@ -6225,13 +6225,13 @@ spec: type: string type: object type: - default: DISABLED + default: Disabled description: Type of SASL authentication to use, or - `DISABLED` if SASL is not used + `Disabled` if SASL is not used enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 + - Disabled + - Plain + - ScramSHA512 type: string type: object tls: @@ -6332,10 +6332,10 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`.' + options are `Kafka` and `Ipfix`.' enum: - - KAFKA - - IPFIX + - Kafka + - Ipfix type: string required: - type @@ -6344,7 +6344,7 @@ spec: kafka: description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` - is `KAFKA`. + is `Kafka`. properties: address: default: "" @@ -6408,13 +6408,13 @@ spec: type: string type: object type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` + default: Disabled + description: Type of SASL authentication to use, or `Disabled` if SASL is not used enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 + - Disabled + - Plain + - ScramSHA512 type: string type: object tls: @@ -6548,18 +6548,18 @@ spec: most flexible configuration. It is ignored for other modes. properties: authToken: - default: DISABLED + default: Disabled description: '`authToken` describes the way to get a token - to authenticate to Loki.
- `DISABLED` does not send - any token with the request.
- `FORWARD` forwards the - user token for authorization.
- `HOST` [deprecated (*)] + to authenticate to Loki.
- `Disabled` does not send + any token with the request.
- `Forward` forwards the + user token for authorization.
- `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set - to `FORWARD`.' + to `Forward`.' enum: - - DISABLED - - HOST - - FORWARD + - Disabled + - Host + - Forward type: string ingesterUrl: default: http://loki:3100/ @@ -7610,14 +7610,14 @@ spec: format: int32 type: integer status: - default: DISABLED + default: Disabled description: '`status` describes the desired status regarding - deploying an horizontal pod autoscaler.
- `DISABLED` - does not deploy an horizontal pod autoscaler.
- `ENABLED` + deploying an horizontal pod autoscaler.
- `Disabled` + does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' enum: - - DISABLED - - ENABLED + - Disabled + - Enabled type: string type: object kafkaConsumerBatchSize: @@ -7727,7 +7727,7 @@ spec: type: boolean provided: description: TLS configuration when `type` is set - to `PROVIDED`. + to `Provided`. properties: certFile: description: '`certFile` defines the path to the @@ -7763,7 +7763,7 @@ spec: type: object providedCaFile: description: Reference to the CA file when `type` - is set to `PROVIDED`. + is set to `Provided`. properties: file: description: File name within the config map or @@ -7791,16 +7791,16 @@ spec: type: string type: object type: - default: DISABLED + default: Disabled description: Select the type of TLS configuration:
- - `DISABLED` (default) to not configure TLS for - the endpoint. - `PROVIDED` to manually provide cert - file and a key file. - `AUTO` to use OpenShift auto + - `Disabled` (default) to not configure TLS for + the endpoint. - `Provided` to manually provide cert + file and a key file. - `Auto` to use OpenShift auto generated certificate using annotations. enum: - - DISABLED - - PROVIDED - - AUTO + - Disabled + - Provided + - Auto type: string type: object type: object diff --git a/config/descriptions/ocp.md b/config/descriptions/ocp.md index 6e2e48cbc..e0e86d923 100644 --- a/config/descriptions/ocp.md +++ b/config/descriptions/ocp.md @@ -28,7 +28,7 @@ oc apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents/252b ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -44,7 +44,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md index 04c610fcb..30e670499 100644 --- a/config/descriptions/upstream.md +++ b/config/descriptions/upstream.md @@ -32,7 +32,7 @@ kubectl apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -50,7 +50,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index f69dc045d..fb1512921 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -14,7 +14,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/netobserv/network-observability-operator - newTag: 1.0.4 + newName: quay.io/amoghrd/network-observability-operator + newTag: main commonLabels: app: netobserv-operator diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml index 52143b988..9dc280d35 100644 --- a/config/samples/flows_v1beta2_flowcollector.yaml +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -4,9 +4,9 @@ metadata: name: cluster spec: namespace: netobserv - deploymentModel: DIRECT + deploymentModel: Direct agent: - type: EBPF + type: Ebpf ebpf: imagePullPolicy: IfNotPresent sampling: 50 @@ -99,7 +99,7 @@ spec: port: 9001 logLevel: info autoscaler: - status: DISABLED + status: Disabled minReplicas: 1 maxReplicas: 3 metrics: @@ -132,12 +132,12 @@ spec: filter: dst_kind: 'Service' exporters: [] - # - type: KAFKA + # - type: Kafka # kafka: # address: "kafka-cluster-kafka-bootstrap.netobserv" # topic: netobserv-flows-export # or - # - type: IPFIX + # - type: Ipfix # ipfix: # targetHost: "ipfix-collector.ipfix.svc.cluster.local" # targetPort: 4739 diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index a0e2543d1..8245c9724 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -344,16 +344,16 @@ func (b *builder) setLokiConfig(lconf *config.LokiConfig) { } func (b *builder) setFrontendConfig(fconf *config.FrontendConfig) { - if helper.UseEBPF(b.desired) { - if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { + if helper.UseEbpf(b.desired) { + if helper.IsPktDropEnabled(&b.desired.Agent.Ebpf) { fconf.Features = append(fconf.Features, "pktDrop") } - if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { + if helper.IsDNSTrackingEnabled(&b.desired.Agent.Ebpf) { fconf.Features = append(fconf.Features, "dnsTracking") } - if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { + if helper.IsFlowRTTEnabled(&b.desired.Agent.Ebpf) { fconf.Features = append(fconf.Features, "flowRTT") } } diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index 2fbb72e14..5fcb357e7 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -105,7 +105,7 @@ func (c *AgentController) Reconcile( if err != nil { return fmt.Errorf("fetching current EBPF Agent: %w", err) } - if !helper.UseEBPF(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { + if !helper.UseEbpf(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { if current == nil { rlog.Info("nothing to do, as the requested agent is not eBPF", "currentAgent", target.Spec.Agent) @@ -125,7 +125,7 @@ func (c *AgentController) Reconcile( current = nil } - if err := c.permissions.Reconcile(ctx, &target.Spec.Agent.EBPF); err != nil { + if err := c.permissions.Reconcile(ctx, &target.Spec.Agent.Ebpf); err != nil { return fmt.Errorf("reconciling permissions: %w", err) } desired, err := c.desired(ctx, target, rlog) @@ -175,7 +175,7 @@ func newMountPropagationMode(m corev1.MountPropagationMode) *corev1.MountPropaga } func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCollector, rlog logr.Logger) (*v1.DaemonSet, error) { - if coll == nil || !helper.UseEBPF(&coll.Spec) { + if coll == nil || !helper.UseEbpf(&coll.Spec) { return nil, nil } version := helper.ExtractVersion(c.config.EBPFAgentImage) @@ -187,7 +187,7 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol volumeMounts := c.volumes.GetMounts() volumes := c.volumes.GetVolumes() - if helper.IsPrivileged(&coll.Spec.Agent.EBPF) { + if helper.IsPrivileged(&coll.Spec.Agent.Ebpf) { volume := corev1.Volume{ Name: bpfNetNSMountName, VolumeSource: corev1.VolumeSource{ @@ -206,8 +206,8 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol volumeMounts = append(volumeMounts, volumeMount) } - if helper.IsFeatureEnabled(&coll.Spec.Agent.EBPF, flowslatest.PacketDrop) { - if !coll.Spec.Agent.EBPF.Privileged { + if helper.IsFeatureEnabled(&coll.Spec.Agent.Ebpf, flowslatest.PacketDrop) { + if !coll.Spec.Agent.Ebpf.Privileged { rlog.Error(fmt.Errorf("invalid configuration"), "To use PacketsDrop feature privileged mode needs to be enabled") } else { volume := corev1.Volume{ @@ -257,8 +257,8 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol Containers: []corev1.Container{{ Name: constants.EBPFAgentName, Image: c.config.EBPFAgentImage, - ImagePullPolicy: corev1.PullPolicy(coll.Spec.Agent.EBPF.ImagePullPolicy), - Resources: coll.Spec.Agent.EBPF.Resources, + ImagePullPolicy: corev1.PullPolicy(coll.Spec.Agent.Ebpf.ImagePullPolicy), + Resources: coll.Spec.Agent.Ebpf.Resources, SecurityContext: c.securityContext(coll), Env: env, VolumeMounts: volumeMounts, @@ -277,9 +277,9 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC corev1.EnvVar{Name: envExport, Value: exportKafka}, corev1.EnvVar{Name: envKafkaBrokers, Value: coll.Spec.Kafka.Address}, corev1.EnvVar{Name: envKafkaTopic, Value: coll.Spec.Kafka.Topic}, - corev1.EnvVar{Name: envKafkaBatchSize, Value: strconv.Itoa(coll.Spec.Agent.EBPF.KafkaBatchSize)}, + corev1.EnvVar{Name: envKafkaBatchSize, Value: strconv.Itoa(coll.Spec.Agent.Ebpf.KafkaBatchSize)}, // For easier user configuration, we can assume a constant message size per flow (~100B in protobuf) - corev1.EnvVar{Name: envKafkaBatchMessages, Value: strconv.Itoa(coll.Spec.Agent.EBPF.KafkaBatchSize / averageMessageSize)}, + corev1.EnvVar{Name: envKafkaBatchMessages, Value: strconv.Itoa(coll.Spec.Agent.Ebpf.KafkaBatchSize / averageMessageSize)}, ) if coll.Spec.Kafka.TLS.Enable { // Annotate pod with certificate reference so that it is reloaded if modified @@ -374,8 +374,8 @@ func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *core RunAsUser: ptr.To(int64(0)), } - if coll.Spec.Agent.EBPF.Privileged { - sc.Privileged = &coll.Spec.Agent.EBPF.Privileged + if coll.Spec.Agent.Ebpf.Privileged { + sc.Privileged = &coll.Spec.Agent.Ebpf.Privileged } else { sc.Capabilities = &corev1.Capabilities{Add: permissions.AllowedCapabilities} } @@ -386,42 +386,42 @@ func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *core func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1.EnvVar { var config []corev1.EnvVar - if coll.Spec.Agent.EBPF.CacheActiveTimeout != "" { + if coll.Spec.Agent.Ebpf.CacheActiveTimeout != "" { config = append(config, corev1.EnvVar{ Name: envCacheActiveTimeout, - Value: coll.Spec.Agent.EBPF.CacheActiveTimeout, + Value: coll.Spec.Agent.Ebpf.CacheActiveTimeout, }) } - if coll.Spec.Agent.EBPF.CacheMaxFlows != 0 { + if coll.Spec.Agent.Ebpf.CacheMaxFlows != 0 { config = append(config, corev1.EnvVar{ Name: envCacheMaxFlows, - Value: strconv.Itoa(int(coll.Spec.Agent.EBPF.CacheMaxFlows)), + Value: strconv.Itoa(int(coll.Spec.Agent.Ebpf.CacheMaxFlows)), }) } - if coll.Spec.Agent.EBPF.LogLevel != "" { + if coll.Spec.Agent.Ebpf.LogLevel != "" { config = append(config, corev1.EnvVar{ Name: envLogLevel, - Value: coll.Spec.Agent.EBPF.LogLevel, + Value: coll.Spec.Agent.Ebpf.LogLevel, }) } - if len(coll.Spec.Agent.EBPF.Interfaces) > 0 { + if len(coll.Spec.Agent.Ebpf.Interfaces) > 0 { config = append(config, corev1.EnvVar{ Name: envInterfaces, - Value: strings.Join(coll.Spec.Agent.EBPF.Interfaces, envListSeparator), + Value: strings.Join(coll.Spec.Agent.Ebpf.Interfaces, envListSeparator), }) } - if len(coll.Spec.Agent.EBPF.ExcludeInterfaces) > 0 { + if len(coll.Spec.Agent.Ebpf.ExcludeInterfaces) > 0 { config = append(config, corev1.EnvVar{ Name: envExcludeInterfaces, - Value: strings.Join(coll.Spec.Agent.EBPF.ExcludeInterfaces, envListSeparator), + Value: strings.Join(coll.Spec.Agent.Ebpf.ExcludeInterfaces, envListSeparator), }) } - sampling := coll.Spec.Agent.EBPF.Sampling + sampling := coll.Spec.Agent.Ebpf.Sampling if sampling != nil && *sampling > 1 { config = append(config, corev1.EnvVar{ Name: envSampling, @@ -429,7 +429,7 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 }) } - if helper.IsFlowRTTEnabled(&coll.Spec.Agent.EBPF) { + if helper.IsFlowRTTEnabled(&coll.Spec.Agent.Ebpf) { config = append(config, corev1.EnvVar{ Name: envEnableFlowRTT, Value: "true", @@ -438,8 +438,8 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 // set GOMEMLIMIT which allows specifying a soft memory cap to force GC when resource limit is reached // to prevent OOM - if coll.Spec.Agent.EBPF.Resources.Limits.Memory() != nil { - if memLimit, ok := coll.Spec.Agent.EBPF.Resources.Limits.Memory().AsInt64(); ok { + if coll.Spec.Agent.Ebpf.Resources.Limits.Memory() != nil { + if memLimit, ok := coll.Spec.Agent.Ebpf.Resources.Limits.Memory().AsInt64(); ok { // we will set the GOMEMLIMIT to current memlimit - 10% as a headroom to account for // memory sources the Go runtime is unaware of memLimit -= int64(float64(memLimit) * 0.1) @@ -447,14 +447,14 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 } } - if helper.IsPktDropEnabled(&coll.Spec.Agent.EBPF) { + if helper.IsPktDropEnabled(&coll.Spec.Agent.Ebpf) { config = append(config, corev1.EnvVar{ Name: envEnablePktDrop, Value: "true", }) } - if helper.IsDNSTrackingEnabled(&coll.Spec.Agent.EBPF) { + if helper.IsDNSTrackingEnabled(&coll.Spec.Agent.Ebpf) { config = append(config, corev1.EnvVar{ Name: envEnableDNSTracking, Value: "true", @@ -465,7 +465,7 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 dedupJustMark := dedupeJustMarkDefault // we need to sort env map to keep idempotency, // as equal maps could be iterated in different order - for _, pair := range helper.KeySorted(coll.Spec.Agent.EBPF.Debug.Env) { + for _, pair := range helper.KeySorted(coll.Spec.Agent.Ebpf.Debug.Env) { k, v := pair[0], pair[1] if k == envDedupe { dedup = v diff --git a/controllers/ebpf/internal/permissions/permissions.go b/controllers/ebpf/internal/permissions/permissions.go index 4b41a8c31..76073be6a 100644 --- a/controllers/ebpf/internal/permissions/permissions.go +++ b/controllers/ebpf/internal/permissions/permissions.go @@ -33,7 +33,7 @@ func NewReconciler(cmn *reconcilers.Common) Reconciler { return Reconciler{Common: *cmn} } -func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollectorEBPF) error { +func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollectorEbpf) error { log.IntoContext(ctx, log.FromContext(ctx).WithName("permissions")) if err := c.reconcileNamespace(ctx); err != nil { @@ -121,7 +121,7 @@ func (c *Reconciler) reconcileServiceAccount(ctx context.Context) error { } func (c *Reconciler) reconcileVendorPermissions( - ctx context.Context, desired *flowslatest.FlowCollectorEBPF, + ctx context.Context, desired *flowslatest.FlowCollectorEbpf, ) error { if c.UseOpenShiftSCC { return c.reconcileOpenshiftPermissions(ctx, desired) @@ -130,7 +130,7 @@ func (c *Reconciler) reconcileVendorPermissions( } func (c *Reconciler) reconcileOpenshiftPermissions( - ctx context.Context, desired *flowslatest.FlowCollectorEBPF, + ctx context.Context, desired *flowslatest.FlowCollectorEbpf, ) error { rlog := log.FromContext(ctx, "securityContextConstraints", constants.EBPFSecurityContext) diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index 1239068b3..cac23d223 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -148,12 +148,12 @@ func (r *FlowCollectorReconciler) Reconcile(ctx context.Context, _ ctrl.Request) // OVS config map for CNO if r.availableAPIs.HasCNO() { - ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.IPFIX.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) + ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.Ipfix.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileCNOFailed(err), desired) } } else { - ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.IPFIX.OVNKubernetes) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.Ipfix.OVNKubernetes) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileOVNKFailed(err), desired) } @@ -380,7 +380,7 @@ func (r *FlowCollectorReconciler) finalize(ctx context.Context, desired *flowsla if !r.availableAPIs.HasCNO() { ns := getNamespaceName(desired) info := r.newCommonInfo(ctx, desired, ns, ns, nil, func(b bool) {}, func(b bool) {}) - ovsConfigController := ovs.NewFlowsConfigOVNKController(&info, desired.Spec.Agent.IPFIX.OVNKubernetes) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&info, desired.Spec.Agent.Ipfix.OVNKubernetes) if err := ovsConfigController.Finalize(ctx, desired); err != nil { return fmt.Errorf("failed to finalize ovn-kubernetes reconciler: %w", err) } diff --git a/controllers/flowcollector_controller_ebpf_test.go b/controllers/flowcollector_controller_ebpf_test.go index 1626aa23a..fe9a2d32a 100644 --- a/controllers/flowcollector_controller_ebpf_test.go +++ b/controllers/flowcollector_controller_ebpf_test.go @@ -62,7 +62,7 @@ func flowCollectorEBPFSpecs() { }, Agent: flowslatest.FlowCollectorAgent{ Type: "EBPF", - EBPF: flowslatest.FlowCollectorEBPF{ + Ebpf: flowslatest.FlowCollectorEbpf{ Sampling: ptr.To(int32(123)), CacheActiveTimeout: "15s", CacheMaxFlows: 100, @@ -148,9 +148,9 @@ func flowCollectorEBPFSpecs() { It("Should update fields that have changed", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - Expect(*fc.Spec.Agent.EBPF.Sampling).To(Equal(int32(123))) - *fc.Spec.Agent.EBPF.Sampling = 4 - fc.Spec.Agent.EBPF.Privileged = true + Expect(*fc.Spec.Agent.Ebpf.Sampling).To(Equal(int32(123))) + *fc.Spec.Agent.Ebpf.Sampling = 4 + fc.Spec.Agent.Ebpf.Privileged = true }) ds := appsv1.DaemonSet{} @@ -281,7 +281,7 @@ func flowCollectorEBPFKafkaSpecs() { ObjectMeta: metav1.ObjectMeta{Name: crKey.Name}, Spec: flowslatest.FlowCollectorSpec{ Namespace: operatorNamespace, - Agent: flowslatest.FlowCollectorAgent{Type: "EBPF"}, + Agent: flowslatest.FlowCollectorAgent{Type: "Ebpf"}, DeploymentModel: flowslatest.DeploymentModelKafka, Kafka: flowslatest.FlowCollectorKafka{ Address: "kafka-cluster-kafka-bootstrap", diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 563c85ead..1760063d9 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -64,7 +64,7 @@ func flowCollectorIsoSpecs() { LogLevel: "trace", Resources: v1.ResourceRequirements{Limits: nil, Requests: nil}, KafkaConsumerReplicas: &zero, - KafkaConsumerAutoscaler: flowslatest.FlowCollectorHPA{Status: "DISABLED", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}}, + KafkaConsumerAutoscaler: flowslatest.FlowCollectorHPA{Status: "Disabled", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}}, KafkaConsumerQueueCapacity: int(zero), KafkaConsumerBatchSize: int(zero), ConversationHeartbeatInterval: &metav1.Duration{Duration: time.Second}, @@ -77,7 +77,7 @@ func flowCollectorIsoSpecs() { Server: flowslatest.MetricsServerConfig{ Port: 12347, TLS: flowslatest.ServerTLS{ - Type: "DISABLED", + Type: "Disabled", Provided: nil, }, }, @@ -87,8 +87,8 @@ func flowCollectorIsoSpecs() { DropUnusedFields: ptr.To(false), }, Agent: flowslatest.FlowCollectorAgent{ - Type: "EBPF", - IPFIX: flowslatest.FlowCollectorIPFIX{ + Type: "Ebpf", + Ipfix: flowslatest.FlowCollectorIpfix{ Sampling: 2, // 0 is forbidden here CacheActiveTimeout: "5s", CacheMaxFlows: 100, @@ -102,7 +102,7 @@ func flowCollectorIsoSpecs() { ContainerName: "test", }, }, - EBPF: flowslatest.FlowCollectorEBPF{ + Ebpf: flowslatest.FlowCollectorEbpf{ Sampling: &zero, CacheActiveTimeout: "5s", CacheMaxFlows: 100, @@ -125,7 +125,7 @@ func flowCollectorIsoSpecs() { ImagePullPolicy: "Always", Resources: v1.ResourceRequirements{Limits: nil, Requests: nil}, LogLevel: "trace", - Autoscaler: flowslatest.FlowCollectorHPA{Status: "DISABLED", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}}, + Autoscaler: flowslatest.FlowCollectorHPA{Status: "Disabled", MinReplicas: &zero, MaxReplicas: zero, Metrics: []ascv2.MetricSpec{}}, PortNaming: flowslatest.ConsolePluginPortConfig{ Enable: ptr.To(false), PortNames: map[string]string{}, @@ -140,7 +140,7 @@ func flowCollectorIsoSpecs() { QuerierURL: "http://loki", StatusURL: "", TenantID: "test", - AuthToken: "DISABLED", + AuthToken: "Disabled", TLS: defaultTLS, StatusTLS: defaultTLS, }, @@ -172,7 +172,7 @@ func flowCollectorIsoSpecs() { Topic: "topic", TLS: defaultTLS, SASL: flowslatest.SASLConfig{ - Type: "DISABLED", + Type: "Disabled", ClientIDReference: flowslatest.FileReference{ Type: "configmap", Name: "", diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 6a10a58ee..028fa83bd 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -122,8 +122,8 @@ func flowCollectorControllerSpecs() { }, }, Agent: flowslatest.FlowCollectorAgent{ - Type: "IPFIX", - IPFIX: flowslatest.FlowCollectorIPFIX{ + Type: "Ipfix", + Ipfix: flowslatest.FlowCollectorIpfix{ Sampling: 200, }, }, @@ -283,7 +283,7 @@ func flowCollectorControllerSpecs() { }, } fc.Spec.Loki = flowslatest.FlowCollectorLoki{} - fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{ + fc.Spec.Agent.Ipfix = flowslatest.FlowCollectorIpfix{ Sampling: 400, CacheActiveTimeout: "30s", CacheMaxFlows: 1000, @@ -399,7 +399,7 @@ func flowCollectorControllerSpecs() { if err := k8sClient.Get(ctx, crKey, &fc); err != nil { return err } - fc.Spec.Agent.IPFIX.Sampling = 1 + fc.Spec.Agent.Ipfix.Sampling = 1 return k8sClient.Update(ctx, &fc) }).Should(Satisfy(func(err error) bool { return err != nil && strings.Contains(err.Error(), "spec.agent.ipfix.sampling: Invalid value: 1") @@ -410,8 +410,8 @@ func flowCollectorControllerSpecs() { if err := k8sClient.Get(ctx, crKey, &fc); err != nil { return err } - fc.Spec.Agent.IPFIX.Sampling = 10 - fc.Spec.Agent.IPFIX.ForceSampleAll = true + fc.Spec.Agent.Ipfix.Sampling = 10 + fc.Spec.Agent.Ipfix.ForceSampleAll = true return k8sClient.Update(ctx, &fc) }).Should(Succeed()) @@ -888,7 +888,7 @@ func flowCollectorControllerSpecs() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { fc.Spec.Processor.Port = 9999 fc.Spec.Namespace = otherNamespace - fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{ + fc.Spec.Agent.Ipfix = flowslatest.FlowCollectorIpfix{ Sampling: 200, } }) diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index 3216c6f2c..a10f47e55 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -117,7 +117,7 @@ func (b *builder) serviceMonitorName() string { return serviceMonitorName(b.conf func (b *builder) prometheusRuleName() string { return prometheusRuleName(b.confKind) } func (b *builder) portProtocol() corev1.Protocol { - if helper.UseEBPF(b.desired) { + if helper.UseEbpf(b.desired) { return corev1.ProtocolTCP } return corev1.ProtocolUDP @@ -393,7 +393,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P }, } - if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { + if helper.IsPktDropEnabled(&b.desired.Agent.Ebpf) { outputPktDropFields := []api.OutputField{ { Name: "PktDropBytes", @@ -425,7 +425,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P outputFields = append(outputFields, outputPktDropFields...) } - if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { + if helper.IsDNSTrackingEnabled(&b.desired.Agent.Ebpf) { outDNSTrackingFields := []api.OutputField{ { Name: "DnsFlagsResponseCode", @@ -439,7 +439,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P outputFields = append(outputFields, outDNSTrackingFields...) } - if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { + if helper.IsFlowRTTEnabled(&b.desired.Agent.Ebpf) { outputFields = append(outputFields, api.OutputField{ Name: "MaxTimeFlowRttNs", Operation: "max", @@ -524,7 +524,7 @@ func (b *builder) addTransformFilter(lastStage config.PipelineBuilderStage) conf // Filter-out unused fields? if helper.PtrBool(b.desired.Processor.DropUnusedFields) { - if helper.UseIPFIX(b.desired) { + if helper.UseIpfix(b.desired) { rules := filters.GetOVSGoflowUnusedRules() transformFilterRules = append(transformFilterRules, rules...) } @@ -544,7 +544,7 @@ func (b *builder) addCustomExportStages(enrichedStage *config.PipelineBuilderSta b.createKafkaWriteStage(fmt.Sprintf("kafka-export-%d", i), &exporter.Kafka, enrichedStage) } if exporter.Type == flowslatest.IpfixExporter { - createIPFIXWriteStage(fmt.Sprintf("IPFIX-export-%d", i), &exporter.IPFIX, enrichedStage) + createIpfixWriteStage(fmt.Sprintf("IPFIX-export-%d", i), &exporter.IPFIX, enrichedStage) } } } @@ -558,11 +558,11 @@ func (b *builder) createKafkaWriteStage(name string, spec *flowslatest.FlowColle }) } -func createIPFIXWriteStage(name string, spec *flowslatest.FlowCollectorIPFIXReceiver, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { +func createIpfixWriteStage(name string, spec *flowslatest.FlowCollectorIpfixReceiver, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { return fromStage.WriteIpfix(name, api.WriteIpfix{ TargetHost: spec.TargetHost, TargetPort: spec.TargetPort, - Transport: getIPFIXTransport(spec.Transport), + Transport: getIpfixTransport(spec.Transport), EnterpriseID: 2, }) } @@ -597,7 +597,7 @@ func (b *builder) getKafkaSASL(sasl *flowslatest.SASLConfig, volumePrefix string } } -func getIPFIXTransport(transport string) string { +func getIpfixTransport(transport string) string { switch transport { case "UDP": return "udp" diff --git a/controllers/flowlogspipeline/flp_ingest_objects.go b/controllers/flowlogspipeline/flp_ingest_objects.go index a8c3cec8f..dc516caa8 100644 --- a/controllers/flowlogspipeline/flp_ingest_objects.go +++ b/controllers/flowlogspipeline/flp_ingest_objects.go @@ -51,7 +51,7 @@ func (b *ingestBuilder) configMap() (*corev1.ConfigMap, string, error) { func (b *ingestBuilder) buildPipelineConfig() ([]config.Stage, []config.StageParam, error) { var pipeline config.PipelineBuilderStage - if helper.UseIPFIX(b.generic.desired) { + if helper.UseIpfix(b.generic.desired) { // IPFIX collector pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{ Port: int(b.generic.desired.Processor.Port), diff --git a/controllers/flowlogspipeline/flp_ingest_reconciler.go b/controllers/flowlogspipeline/flp_ingest_reconciler.go index 195dd465b..f451b2ffd 100644 --- a/controllers/flowlogspipeline/flp_ingest_reconciler.go +++ b/controllers/flowlogspipeline/flp_ingest_reconciler.go @@ -79,7 +79,7 @@ func (r *flpIngesterReconciler) reconcile(ctx context.Context, desired *flowslat } // Ingester only used with Kafka and without eBPF - if !helper.UseKafka(&desired.Spec) || helper.UseEBPF(&desired.Spec) { + if !helper.UseKafka(&desired.Spec) || helper.UseEbpf(&desired.Spec) { r.Managed.TryDeleteAll(ctx) return nil } diff --git a/controllers/flowlogspipeline/flp_monolith_objects.go b/controllers/flowlogspipeline/flp_monolith_objects.go index e846d1e07..c037bfc4d 100644 --- a/controllers/flowlogspipeline/flp_monolith_objects.go +++ b/controllers/flowlogspipeline/flp_monolith_objects.go @@ -52,7 +52,7 @@ func (b *monolithBuilder) configMap() (*corev1.ConfigMap, string, error) { func (b *monolithBuilder) buildPipelineConfig() ([]config.Stage, []config.StageParam, error) { var pipeline config.PipelineBuilderStage - if helper.UseIPFIX(b.generic.desired) { + if helper.UseIpfix(b.generic.desired) { // IPFIX collector pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{ Port: int(b.generic.desired.Processor.Port), diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index 04981295e..4f9944058 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -59,7 +59,7 @@ func getConfig(lokiMode ...string) flowslatest.FlowCollectorSpec { return flowslatest.FlowCollectorSpec{ DeploymentModel: flowslatest.DeploymentModelDirect, - Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIPFIX}, + Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIpfix}, Processor: flowslatest.FlowCollectorFLP{ Port: 2055, ImagePullPolicy: string(pullPolicy), @@ -967,7 +967,7 @@ func TestPipelineWithExporter(t *testing.T) { cfg.Exporters = append(cfg.Exporters, &flowslatest.FlowCollectorExporter{ Type: flowslatest.IpfixExporter, - IPFIX: flowslatest.FlowCollectorIPFIXReceiver{ + IPFIX: flowslatest.FlowCollectorIpfixReceiver{ TargetHost: "ipfix-receiver-test", TargetPort: 9999, Transport: "TCP", diff --git a/controllers/flowlogspipeline/flp_transfo_objects.go b/controllers/flowlogspipeline/flp_transfo_objects.go index 7cab884c6..603f052aa 100644 --- a/controllers/flowlogspipeline/flp_transfo_objects.go +++ b/controllers/flowlogspipeline/flp_transfo_objects.go @@ -57,7 +57,7 @@ func (b *transfoBuilder) buildPipelineConfig() ([]config.Stage, []config.StagePa // For now, we leave this communication via JSON and just setup protobuf ingestion when // the transformer is communicating directly via eBPF agent decoder := api.Decoder{Type: "protobuf"} - if helper.UseIPFIX(b.generic.desired) { + if helper.UseIpfix(b.generic.desired) { decoder = api.Decoder{Type: "json"} } pipeline := config.NewKafkaPipeline("kafka-read", api.IngestKafka{ diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go index 1916372d9..c8843dd49 100644 --- a/controllers/ovs/flowsconfig_cno_reconciler.go +++ b/controllers/ovs/flowsconfig_cno_reconciler.go @@ -38,7 +38,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl if err != nil { return err } - if !helper.UseIPFIX(&target.Spec) { + if !helper.UseIpfix(&target.Spec) { if current == nil { return nil } @@ -58,7 +58,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl // compare current and desired if current == nil { - rlog.Info("Provided IPFIX configuration. Creating " + c.ovsConfigMapName + " ConfigMap") + rlog.Info("Provided Ipfix configuration. Creating " + c.ovsConfigMapName + " ConfigMap") cm, err := c.flowsConfigMap(desired) if err != nil { return err @@ -67,7 +67,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl } if desired != nil && *desired != *current { - rlog.Info("Provided IPFIX configuration differs current configuration. Updating") + rlog.Info("Provided Ipfix configuration differs current configuration. Updating") cm, err := c.flowsConfigMap(desired) if err != nil { return err @@ -101,11 +101,11 @@ func (c *FlowsConfigCNOController) current(ctx context.Context) (*flowsConfig, e func (c *FlowsConfigCNOController) desired( ctx context.Context, coll *flowslatest.FlowCollector) *flowsConfig { - corrected := coll.Spec.Agent.IPFIX.DeepCopy() + corrected := coll.Spec.Agent.Ipfix.DeepCopy() corrected.Sampling = getSampling(ctx, corrected) return &flowsConfig{ - FlowCollectorIPFIX: *corrected, + FlowCollectorIpfix: *corrected, NodePort: coll.Spec.Processor.Port, } } diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go index 926aed71c..ed4ffb42d 100644 --- a/controllers/ovs/flowsconfig_ovnk_reconciler.go +++ b/controllers/ovs/flowsconfig_ovnk_reconciler.go @@ -51,15 +51,15 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows Name: c.config.DaemonSetName, Namespace: c.config.Namespace, }, ds); err != nil { - if kerr.IsNotFound(err) && !helper.UseIPFIX(&target.Spec) { - // If we don't want IPFIX and ovn-k daemonset is not found, assume there no ovn-k, just succeed + if kerr.IsNotFound(err) && !helper.UseIpfix(&target.Spec) { + // If we don't want Ipfix and ovn-k daemonset is not found, assume there no ovn-k, just succeed rlog.Info("Skip reconciling OVN: OVN DaemonSet not found") return nil } return fmt.Errorf("retrieving %s/%s daemonset: %w", c.config.Namespace, c.config.DaemonSetName, err) } - ovnkubeNode := helper.FindContainer(&ds.Spec.Template.Spec, target.Spec.Agent.IPFIX.OVNKubernetes.ContainerName) + ovnkubeNode := helper.FindContainer(&ds.Spec.Template.Spec, target.Spec.Agent.Ipfix.OVNKubernetes.ContainerName) if ovnkubeNode == nil { return errors.New("could not find container ovnkube-node") } @@ -71,7 +71,7 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows } } if anyUpdate { - rlog.Info("Provided IPFIX configuration differs current configuration. Updating") + rlog.Info("Provided Ipfix configuration differs current configuration. Updating") return c.Update(ctx, ds) } @@ -80,21 +80,21 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows } func (c *FlowsConfigOVNKController) desiredEnv(ctx context.Context, coll *flowslatest.FlowCollector) (map[string]string, error) { - cacheTimeout, err := time.ParseDuration(coll.Spec.Agent.IPFIX.CacheActiveTimeout) + cacheTimeout, err := time.ParseDuration(coll.Spec.Agent.Ipfix.CacheActiveTimeout) if err != nil { return nil, err } - sampling := getSampling(ctx, &coll.Spec.Agent.IPFIX) + sampling := getSampling(ctx, &coll.Spec.Agent.Ipfix) envs := map[string]string{ "OVN_IPFIX_TARGETS": "", "OVN_IPFIX_CACHE_ACTIVE_TIMEOUT": strconv.Itoa(int(cacheTimeout.Seconds())), - "OVN_IPFIX_CACHE_MAX_FLOWS": strconv.Itoa(int(coll.Spec.Agent.IPFIX.CacheMaxFlows)), + "OVN_IPFIX_CACHE_MAX_FLOWS": strconv.Itoa(int(coll.Spec.Agent.Ipfix.CacheMaxFlows)), "OVN_IPFIX_SAMPLING": strconv.Itoa(int(sampling)), } - if !helper.UseIPFIX(&coll.Spec) { - // No IPFIX => leave target empty and return + if !helper.UseIpfix(&coll.Spec) { + // No Ipfix => leave target empty and return return envs, nil } diff --git a/controllers/ovs/flowsconfig_types.go b/controllers/ovs/flowsconfig_types.go index b84a20957..846cbc5c6 100644 --- a/controllers/ovs/flowsconfig_types.go +++ b/controllers/ovs/flowsconfig_types.go @@ -12,7 +12,7 @@ import ( ) type flowsConfig struct { - flowslatest.FlowCollectorIPFIX `json:",inline" mapstructure:",squash"` + flowslatest.FlowCollectorIpfix `json:",inline" mapstructure:",squash"` SharedTarget string `json:"sharedTarget,omitempty" mapstructure:"sharedTarget,omitempty"` NodePort int32 `json:"nodePort,omitempty" mapstructure:"nodePort,omitempty"` } @@ -41,7 +41,7 @@ func (fc *flowsConfig) asStringMap() (map[string]string, error) { // getSampling returns the configured sampling, or 1 if ipfix.forceSampleAll is true // Note that configured sampling has a minimum value of 2. // See also https://bugzilla.redhat.com/show_bug.cgi?id=2103136 , https://bugzilla.redhat.com/show_bug.cgi?id=2104943 -func getSampling(ctx context.Context, cfg *flowslatest.FlowCollectorIPFIX) int32 { +func getSampling(ctx context.Context, cfg *flowslatest.FlowCollectorIpfix) int32 { rlog := log.FromContext(ctx) if cfg.ForceSampleAll { rlog.Info("Warning, sampling is set to 1. This may put cluster stability at risk.") diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 7c267402e..29d061a96 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -9044,10 +9044,10 @@ Defines the desired state of the FlowCollector resource.

*: the mention deploymentModel enum - `deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `DIRECT` (default) to make the flow processor listening directly from the agents.
- `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
+ `deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `Direct` (default) to make the flow processor listening directly from the agents.
- `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).

- Enum: DIRECT, KAFKA
- Default: DIRECT
+ Enum: Direct, Kafka
+ Default: Direct
false @@ -9061,7 +9061,7 @@ Defines the desired state of the FlowCollector resource.

*: the mention kafka object - Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`.
+ Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`.
false @@ -9111,24 +9111,24 @@ Agent configuration for flows extraction. ebpf object - `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
+ `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`.
false ipfix object - `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
+ `ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.
false type enum - `type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ `type` selects the flows tracing agent. Possible values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
`Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, but they would require manual configuration).

- Enum: EBPF, IPFIX
- Default: EBPF
+ Enum: Ebpf, Ipfix
+ Default: Ebpf
false @@ -9140,7 +9140,7 @@ Agent configuration for flows extraction. -`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`. +`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`. @@ -9363,7 +9363,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`. +`ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.
@@ -9405,7 +9405,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -9414,14 +9414,14 @@ ResourceClaim references one entry in PodSpec.ResourceClaims.
forceSampleAll boolean - `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.
+ `forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.

Default: false
ovnKubernetes object - `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+ `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
false
sampling integer - `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.
+ `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix.

Format: int32
Default: 400
@@ -9466,7 +9466,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. +`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. @@ -9669,10 +9669,10 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -10710,16 +10710,16 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -10738,7 +10738,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. +Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to.
status enum - `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.

+ `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `Disabled` does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.


- Enum: DISABLED, ENABLED
- Default: DISABLED
+ Enum: Disabled, Enabled
+ Default: Disabled
false
type enum - `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.
+ `type` selects the type of exporters. The available options are `Kafka` and `Ipfix`.

- Enum: KAFKA, IPFIX
+ Enum: Kafka, Ipfix
true
ipfix object - IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+ Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to.
false
@@ -10753,7 +10753,7 @@ IPFIX configuration, such as the IP address and port to send enriched IPFIX flow @@ -10762,14 +10762,14 @@ IPFIX configuration, such as the IP address and port to send enriched IPFIX flow @@ -10864,10 +10864,10 @@ SASL authentication configuration. [Unsupported (*)]. @@ -11153,7 +11153,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka -Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. +Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`.
targetHost string - Address of the IPFIX external receiver
+ Address of the Ipfix external receiver

Default:
targetPort integer - Port for the IPFIX external receiver
+ Port for the Ipfix external receiver
true
transport enum - Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
+ Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`.

Enum: TCP, UDP
type enum - Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ Type of SASL authentication to use, or `Disabled` if SASL is not used

- Enum: DISABLED, PLAIN, SCRAM-SHA512
- Default: DISABLED
+ Enum: Disabled, Plain, ScramSHA512
+ Default: Disabled
false
@@ -11234,10 +11234,10 @@ SASL authentication configuration. [Unsupported (*)]. @@ -11708,10 +11708,10 @@ Loki configuration for "Manual" mode. This is the most flexible configuration. I @@ -12816,10 +12816,10 @@ TLS client configuration for Loki URL. @@ -13799,24 +13799,24 @@ TLS configuration. @@ -13828,7 +13828,7 @@ TLS configuration. -TLS configuration when `type` is set to `PROVIDED`. +TLS configuration when `type` is set to `Provided`.
type enum - Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ Type of SASL authentication to use, or `Disabled` if SASL is not used

- Enum: DISABLED, PLAIN, SCRAM-SHA512
- Default: DISABLED
+ Enum: Disabled, Plain, ScramSHA512
+ Default: Disabled
false
authToken enum - `authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` does not send any token with the request.
- `FORWARD` forwards the user token for authorization.
- `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.
+ `authToken` describes the way to get a token to authenticate to Loki.
- `Disabled` does not send any token with the request.
- `Forward` forwards the user token for authorization.
- `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `Forward`.

- Enum: DISABLED, HOST, FORWARD
- Default: DISABLED
+ Enum: Disabled, Host, Forward
+ Default: Disabled
false
status enum - `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.

+ `status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `Disabled` does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.


- Enum: DISABLED, ENABLED
- Default: DISABLED
+ Enum: Disabled, Enabled
+ Default: Disabled
false
provided object - TLS configuration when `type` is set to `PROVIDED`.
+ TLS configuration when `type` is set to `Provided`.
false
providedCaFile object - Reference to the CA file when `type` is set to `PROVIDED`.
+ Reference to the CA file when `type` is set to `Provided`.
false
type enum - Select the type of TLS configuration:
- `DISABLED` (default) to not configure TLS for the endpoint. - `PROVIDED` to manually provide cert file and a key file. - `AUTO` to use OpenShift auto generated certificate using annotations.
+ Select the type of TLS configuration:
- `Disabled` (default) to not configure TLS for the endpoint. - `Provided` to manually provide cert file and a key file. - `Auto` to use OpenShift auto generated certificate using annotations.

- Enum: DISABLED, PROVIDED, AUTO
- Default: DISABLED
+ Enum: Disabled, Provided, Auto
+ Default: Disabled
false
@@ -13887,7 +13887,7 @@ TLS configuration when `type` is set to `PROVIDED`. -Reference to the CA file when `type` is set to `PROVIDED`. +Reference to the CA file when `type` is set to `Provided`.
diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index 6407bf72b..aedf7b049 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -3535,7 +3535,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) + name: Sampling (Ebpf) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -3563,7 +3563,7 @@ spec: description: Agent configuration for flows extraction. properties: ebpf: - description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.' + description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`.' properties: cacheActiveTimeout: default: 5s @@ -3685,7 +3685,7 @@ spec: type: integer type: object ipfix: - description: '`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.' + description: '`ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.' properties: cacheActiveTimeout: default: 20s @@ -3708,10 +3708,10 @@ spec: type: object forceSampleAll: default: false - description: '`forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' + description: '`forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' type: boolean ovnKubernetes: - description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' + description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: containerName: default: ovnkube-node @@ -3728,17 +3728,17 @@ spec: type: object sampling: default: 400 - description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.' + description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix.' format: int32 minimum: 2 type: integer type: object type: - default: EBPF - description: '`type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).' + default: Ebpf + description: '`type` selects the flows tracing agent. Possible values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
`Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, but they would require manual configuration).' enum: - - EBPF - - IPFIX + - Ebpf + - Ipfix type: string type: object consolePlugin: @@ -4083,11 +4083,11 @@ spec: format: int32 type: integer status: - default: DISABLED - description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
' + default: Disabled + description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `Disabled` does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' enum: - - DISABLED - - ENABLED + - Disabled + - Enabled type: string type: object enable: @@ -4230,11 +4230,11 @@ spec: type: object type: object deploymentModel: - default: DIRECT - description: '`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `DIRECT` (default) to make the flow processor listening directly from the agents.
- `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' + default: Direct + description: '`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `Direct` (default) to make the flow processor listening directly from the agents.
- `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' enum: - - DIRECT - - KAFKA + - Direct + - Kafka type: string exporters: description: '`exporters` define additional optional exporters for custom consumption or storage.' @@ -4242,17 +4242,17 @@ spec: description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.' properties: ipfix: - description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. + description: Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to. properties: targetHost: default: "" - description: Address of the IPFIX external receiver + description: Address of the Ipfix external receiver type: string targetPort: - description: Port for the IPFIX external receiver + description: Port for the Ipfix external receiver type: integer transport: - description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. + description: Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`. enum: - TCP - UDP @@ -4312,12 +4312,12 @@ spec: type: string type: object type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` if SASL is not used + default: Disabled + description: Type of SASL authentication to use, or `Disabled` if SASL is not used enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 + - Disabled + - Plain + - ScramSHA512 type: string type: object tls: @@ -4387,17 +4387,17 @@ spec: - topic type: object type: - description: '`type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.' + description: '`type` selects the type of exporters. The available options are `Kafka` and `Ipfix`.' enum: - - KAFKA - - IPFIX + - Kafka + - Ipfix type: string required: - type type: object type: array kafka: - description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. + description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`. properties: address: default: "" @@ -4447,12 +4447,12 @@ spec: type: string type: object type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` if SASL is not used + default: Disabled + description: Type of SASL authentication to use, or `Disabled` if SASL is not used enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 + - Disabled + - Plain + - ScramSHA512 type: string type: object tls: @@ -4553,12 +4553,12 @@ spec: description: Loki configuration for "Manual" mode. This is the most flexible configuration. It is ignored for other modes. properties: authToken: - default: DISABLED - description: '`authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` does not send any token with the request.
- `FORWARD` forwards the user token for authorization.
- `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.' + default: Disabled + description: '`authToken` describes the way to get a token to authenticate to Loki.
- `Disabled` does not send any token with the request.
- `Forward` forwards the user token for authorization.
- `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `Forward`.' enum: - - DISABLED - - HOST - - FORWARD + - Disabled + - Host + - Forward type: string ingesterUrl: default: http://loki:3100/ @@ -5265,11 +5265,11 @@ spec: format: int32 type: integer status: - default: DISABLED - description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
' + default: Disabled + description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `Disabled` does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' enum: - - DISABLED - - ENABLED + - Disabled + - Enabled type: string type: object kafkaConsumerBatchSize: @@ -5342,7 +5342,7 @@ spec: description: '`insecureSkipVerify` allows skipping client-side verification of the provided certificate. If set to `true`, the `providedCaFile` field is ignored.' type: boolean provided: - description: TLS configuration when `type` is set to `PROVIDED`. + description: TLS configuration when `type` is set to `Provided`. properties: certFile: description: '`certFile` defines the path to the certificate file name within the config map or secret' @@ -5365,7 +5365,7 @@ spec: type: string type: object providedCaFile: - description: Reference to the CA file when `type` is set to `PROVIDED`. + description: Reference to the CA file when `type` is set to `Provided`. properties: file: description: File name within the config map or secret @@ -5385,12 +5385,12 @@ spec: type: string type: object type: - default: DISABLED - description: Select the type of TLS configuration:
- `DISABLED` (default) to not configure TLS for the endpoint. - `PROVIDED` to manually provide cert file and a key file. - `AUTO` to use OpenShift auto generated certificate using annotations. + default: Disabled + description: Select the type of TLS configuration:
- `Disabled` (default) to not configure TLS for the endpoint. - `Provided` to manually provide cert file and a key file. - `Auto` to use OpenShift auto generated certificate using annotations. enum: - - DISABLED - - PROVIDED - - AUTO + - Disabled + - Provided + - Auto type: string type: object type: object diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index 0ddf85561..2e736675d 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -10,18 +10,18 @@ import ( ) func GetSampling(spec *flowslatest.FlowCollectorSpec) int { - if UseEBPF(spec) { - return int(*spec.Agent.EBPF.Sampling) + if UseEbpf(spec) { + return int(*spec.Agent.Ebpf.Sampling) } - return int(spec.Agent.IPFIX.Sampling) + return int(spec.Agent.Ipfix.Sampling) } -func UseEBPF(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentEBPF +func UseEbpf(spec *flowslatest.FlowCollectorSpec) bool { + return spec.Agent.Type == flowslatest.AgentEbpf } -func UseIPFIX(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentIPFIX +func UseIpfix(spec *flowslatest.FlowCollectorSpec) bool { + return spec.Agent.Type == flowslatest.AgentIpfix } func UseKafka(spec *flowslatest.FlowCollectorSpec) bool { @@ -90,7 +90,7 @@ func UseConsolePlugin(spec *flowslatest.FlowCollectorSpec) bool { (spec.ConsolePlugin.Enable == nil || *spec.ConsolePlugin.Enable) } -func IsFeatureEnabled(spec *flowslatest.FlowCollectorEBPF, feature flowslatest.AgentFeature) bool { +func IsFeatureEnabled(spec *flowslatest.FlowCollectorEbpf, feature flowslatest.AgentFeature) bool { for _, f := range spec.Features { if f == feature { return true @@ -99,22 +99,22 @@ func IsFeatureEnabled(spec *flowslatest.FlowCollectorEBPF, feature flowslatest.A return false } -func IsPrivileged(spec *flowslatest.FlowCollectorEBPF) bool { +func IsPrivileged(spec *flowslatest.FlowCollectorEbpf) bool { return spec.Privileged } -func IsPktDropEnabled(spec *flowslatest.FlowCollectorEBPF) bool { +func IsPktDropEnabled(spec *flowslatest.FlowCollectorEbpf) bool { if IsPrivileged(spec) && IsFeatureEnabled(spec, flowslatest.PacketDrop) { return true } return false } -func IsDNSTrackingEnabled(spec *flowslatest.FlowCollectorEBPF) bool { +func IsDNSTrackingEnabled(spec *flowslatest.FlowCollectorEbpf) bool { return IsFeatureEnabled(spec, flowslatest.DNSTracking) } -func IsFlowRTTEnabled(spec *flowslatest.FlowCollectorEBPF) bool { +func IsFlowRTTEnabled(spec *flowslatest.FlowCollectorEbpf) bool { return IsFeatureEnabled(spec, flowslatest.FlowRTT) } From 7b65b1ad626859d2309f3926e21c34ef70a7e416 Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Fri, 3 Nov 2023 17:18:25 -0400 Subject: [PATCH 03/16] Revert URL changes --- .../netobserv-operator.clusterserviceversion.yaml | 8 ++++---- config/descriptions/ocp.md | 4 ++-- config/descriptions/upstream.md | 4 ++-- config/manager/kustomization.yaml | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index a691119c5..241d6d082 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -526,7 +526,7 @@ metadata: capabilities: Seamless Upgrades categories: Monitoring console.openshift.io/plugins: '["netobserv-plugin"]' - containerImage: quay.io/amoghrd/network-observability-operator:main + containerImage: quay.io/netobserv/network-observability-operator:1.0.4 createdAt: ':created-at:' description: Network flows collector and monitoring solution operatorframework.io/suggested-namespace: openshift-netobserv-operator @@ -598,7 +598,7 @@ spec: ## Configuration - The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). + The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -616,7 +616,7 @@ spec: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. - - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). + - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -886,7 +886,7 @@ spec: - name: DOWNSTREAM_DEPLOYMENT value: "false" - name: PROFILING_BIND_ADDRESS - image: quay.io/amoghrd/network-observability-operator:main + image: quay.io/netobserv/network-observability-operator:1.0.4 imagePullPolicy: Always livenessProbe: httpGet: diff --git a/config/descriptions/ocp.md b/config/descriptions/ocp.md index e0e86d923..6e2e48cbc 100644 --- a/config/descriptions/ocp.md +++ b/config/descriptions/ocp.md @@ -28,7 +28,7 @@ oc apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents/252b ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -44,7 +44,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md index 30e670499..04c610fcb 100644 --- a/config/descriptions/upstream.md +++ b/config/descriptions/upstream.md @@ -32,7 +32,7 @@ kubectl apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -50,7 +50,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index fb1512921..f69dc045d 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -14,7 +14,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/amoghrd/network-observability-operator - newTag: main + newName: quay.io/netobserv/network-observability-operator + newTag: 1.0.4 commonLabels: app: netobserv-operator From 7409f539d181287dad9477a073b1dba751ef421f Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Mon, 6 Nov 2023 14:54:49 +0100 Subject: [PATCH 04/16] manage conversions --- api/v1alpha1/flowcollector_types.go | 1 + api/v1alpha1/flowcollector_webhook.go | 163 +++++++++- api/v1alpha1/zz_generated.conversion.go | 283 ++++++++++------ api/v1beta1/flowcollector_types.go | 1 + api/v1beta1/flowcollector_webhook.go | 193 ++++++++++- api/v1beta1/flowcollector_webhook_test.go | 2 +- api/v1beta1/zz_generated.conversion.go | 307 ++++++++++++------ api/v1beta2/flowcollector_types.go | 129 ++++---- api/v1beta2/zz_generated.deepcopy.go | 30 +- .../flows.netobserv.io_flowcollectors.yaml | 46 +-- ...observ-operator.clusterserviceversion.yaml | 2 +- .../flows.netobserv.io_flowcollectors.yaml | 46 +-- .../samples/flows_v1beta2_flowcollector.yaml | 4 +- .../consoleplugin/consoleplugin_objects.go | 8 +- controllers/ebpf/agent_controller.go | 58 ++-- .../ebpf/internal/permissions/permissions.go | 6 +- controllers/flowcollector_controller.go | 6 +- ...wcollector_controller_certificates_test.go | 4 +- .../flowcollector_controller_ebpf_test.go | 12 +- .../flowcollector_controller_iso_test.go | 6 +- controllers/flowcollector_controller_test.go | 14 +- .../flowlogspipeline/flp_common_objects.go | 24 +- .../flowlogspipeline/flp_ingest_objects.go | 2 +- .../flowlogspipeline/flp_ingest_reconciler.go | 2 +- .../flowlogspipeline/flp_monolith_objects.go | 2 +- controllers/flowlogspipeline/flp_test.go | 4 +- .../flowlogspipeline/flp_transfo_objects.go | 2 +- controllers/ovs/flowsconfig_cno_reconciler.go | 10 +- .../ovs/flowsconfig_ovnk_reconciler.go | 18 +- controllers/ovs/flowsconfig_types.go | 4 +- docs/FlowCollector.md | 36 +- ...ned.flows.netobserv.io_flowcollectors.yaml | 32 +- pkg/conversion/conversion.go | 57 ++++ pkg/conversion/conversion_test.go | 39 +++ pkg/helper/flowcollector.go | 24 +- 35 files changed, 1094 insertions(+), 483 deletions(-) create mode 100644 pkg/conversion/conversion_test.go diff --git a/api/v1alpha1/flowcollector_types.go b/api/v1alpha1/flowcollector_types.go index 9d74ed366..8c99b854c 100644 --- a/api/v1alpha1/flowcollector_types.go +++ b/api/v1alpha1/flowcollector_types.go @@ -76,6 +76,7 @@ type FlowCollectorSpec struct { // exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available. // +optional + // +k8s:conversion-gen=false Exporters []*FlowCollectorExporter `json:"exporters"` } diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index 01183b758..a82f1de33 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -46,9 +46,9 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { } // Agent - if restored.Spec.Agent.Ebpf.Features != nil { - dst.Spec.Agent.Ebpf.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.Ebpf.Features)) - copy(dst.Spec.Agent.Ebpf.Features, restored.Spec.Agent.Ebpf.Features) + if restored.Spec.Agent.EBPF.Features != nil { + dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) + copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) } // Processor @@ -130,7 +130,7 @@ func Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2 out.QuerierURL = manual.QuerierURL out.StatusURL = manual.StatusURL out.TenantID = manual.TenantID - out.AuthToken = manual.AuthToken + out.AuthToken = utilconversion.PascalToUpper(string(manual.AuthToken), '_') if err := Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(&manual.TLS, &out.TLS, nil); err != nil { return fmt.Errorf("copying v1beta2.Loki.TLS into v1alpha1.Loki.TLS: %w", err) } @@ -147,7 +147,7 @@ func Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCol QuerierURL: in.QuerierURL, StatusURL: in.StatusURL, TenantID: in.TenantID, - AuthToken: in.AuthToken, + AuthToken: v1beta2.LokiAuthToken(utilconversion.UpperToPascal(in.AuthToken)), } // fallback on ingester url if querier is not set if len(out.Manual.QuerierURL) == 0 { @@ -169,15 +169,156 @@ func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsole // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta1 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorEbpf_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEbpf, out *FlowCollectorEBPF, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorEbpf_To_v1alpha1_FlowCollectorEBPF(in, out, s) +func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) } -// // This function need to be manually created because conversion-gen not able to create it intentionally because -// // we have new defined fields in v1beta2 not in v1alpha1 -// // nolint:golint,stylecheck,revive +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s); err != nil { + return err + } + out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(utilconversion.UpperToPascal(in.DeploymentModel)) + out.Exporters = []*v1beta2.FlowCollectorExporter{} + for _, inExporter := range in.Exporters { + outExporter := &v1beta2.FlowCollectorExporter{} + if err := Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(inExporter, outExporter, s); err != nil { + return err + } + out.Exporters = append(out.Exporters, outExporter) + } + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s); err != nil { + return err + } + out.DeploymentModel = utilconversion.PascalToUpper(string(in.DeploymentModel), '_') + out.Exporters = []*FlowCollectorExporter{} + for _, inExporter := range in.Exporters { + outExporter := &FlowCollectorExporter{} + if err := Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(inExporter, outExporter, s); err != nil { + return err + } + out.Exporters = append(out.Exporters, outExporter) + } + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s); err != nil { + return err + } + out.Type = v1beta2.FlowCollectorAgentType(utilconversion.UpperToPascal(in.Type)) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s); err != nil { + return err + } + out.Type = utilconversion.PascalToUpper(string(in.Type), '_') + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in, out, s); err != nil { + return err + } + out.Type = v1beta2.ServerTLSConfigType(utilconversion.UpperToPascal(string(in.Type))) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s apiconversion.Scope) error { - return autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s) + if err := autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s); err != nil { + return err + } + out.Type = ServerTLSConfigType(utilconversion.PascalToUpper(string(in.Type), '_')) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s); err != nil { + return err + } + out.Status = v1beta2.HPAStatus(utilconversion.UpperToPascal(in.Status)) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s); err != nil { + return err + } + out.Status = utilconversion.PascalToUpper(string(in.Status), '_') + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in, out, s); err != nil { + return err + } + out.Type = v1beta2.SASLType(utilconversion.UpperToPascal(string(in.Type))) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in, out, s); err != nil { + return err + } + out.Type = SASLType(utilconversion.PascalToUpper(string(in.Type), '_')) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s); err != nil { + return err + } + out.Type = v1beta2.ExporterType(utilconversion.UpperToPascal(string(in.Type))) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1alpha1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s); err != nil { + return err + } + out.Type = ExporterType(utilconversion.PascalToUpper(string(in.Type), '_')) + return nil } // This function need to be manually created because conversion-gen not able to create it intentionally because diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index d91507131..d9afc6552 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -108,43 +108,38 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) }); err != nil { return err } @@ -168,16 +163,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) }); err != nil { @@ -218,23 +203,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*SASLConfig)(nil), (*v1beta2.SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(a.(*SASLConfig), b.(*v1beta2.SASLConfig), scope) + if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.SASLConfig)(nil), (*SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(a.(*v1beta2.SASLConfig), b.(*SASLConfig), scope) + if err := s.AddConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) + if err := s.AddConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) + if err := s.AddConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) }); err != nil { return err } @@ -243,26 +228,71 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*SASLConfig)(nil), (*v1beta2.SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(a.(*SASLConfig), b.(*v1beta2.SASLConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.SASLConfig)(nil), (*SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(a.(*v1beta2.SASLConfig), b.(*SASLConfig), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) }); err != nil { @@ -475,16 +505,24 @@ func Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCol } func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - out.Type = in.Type - // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type - // WARNING: in.EBPF requires manual conversion: does not exist in peer-type + out.Type = v1beta2.FlowCollectorAgentType(in.Type) + if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } return nil } func autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - out.Type = in.Type - // WARNING: in.Ipfix requires manual conversion: does not exist in peer-type - // WARNING: in.Ebpf requires manual conversion: does not exist in peer-type + out.Type = string(in.Type) + if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } return nil } @@ -536,38 +574,68 @@ func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorCon return nil } +func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + // WARNING: in.Features requires manual conversion: does not exist in peer-type + return nil +} + func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { out.Type = v1beta2.ExporterType(in.Type) if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIpfixReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil } -// Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { out.Type = ExporterType(in.Type) if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta2_FlowCollectorIpfixReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil } -// Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s) -} - func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { out.Port = in.Port out.HealthPort = in.HealthPort @@ -639,29 +707,81 @@ func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1bet } func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { - out.Status = in.Status + out.Status = v1beta2.HPAStatus(in.Status) out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) return nil } -// Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - out.Status = in.Status + out.Status = string(in.Status) out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) return nil } -// Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s) +func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in, out, s) } func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { @@ -808,19 +928,14 @@ func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *Flo if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } - out.DeploymentModel = in.DeploymentModel + out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(in.DeploymentModel) if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - out.Exporters = *(*[]*v1beta2.FlowCollectorExporter)(unsafe.Pointer(&in.Exporters)) + // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { out.Namespace = in.Namespace if err := Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { @@ -835,19 +950,14 @@ func autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1b if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } - out.DeploymentModel = in.DeploymentModel + out.DeploymentModel = string(in.DeploymentModel) if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - out.Exporters = *(*[]*FlowCollectorExporter)(unsafe.Pointer(&in.Exporters)) + // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s) -} - func autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Namespace = in.Namespace @@ -955,11 +1065,6 @@ func autoConvert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out * return nil } -// Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig is an autogenerated conversion function. -func Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in, out, s) -} - func autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { out.Type = SASLType(in.Type) if err := Convert_v1beta2_FileReference_To_v1alpha1_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { @@ -971,22 +1076,12 @@ func autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfi return nil } -// Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig is an autogenerated conversion function. -func Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { - return autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in, out, s) -} - func autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { out.Type = v1beta2.ServerTLSConfigType(in.Type) out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) return nil } -// Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS is an autogenerated conversion function. -func Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { - return autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in, out, s) -} - func autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { out.Type = ServerTLSConfigType(in.Type) out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index 18c48d90f..68d6d311b 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -78,6 +78,7 @@ type FlowCollectorSpec struct { // `exporters` define additional optional exporters for custom consumption or storage. // +optional + // +k8s:conversion-gen=false Exporters []*FlowCollectorExporter `json:"exporters"` } diff --git a/api/v1beta1/flowcollector_webhook.go b/api/v1beta1/flowcollector_webhook.go index 00885512c..98b071254 100644 --- a/api/v1beta1/flowcollector_webhook.go +++ b/api/v1beta1/flowcollector_webhook.go @@ -76,8 +76,8 @@ func (r *FlowCollectorList) ConvertFrom(srcRaw conversion.Hub) error { // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta2 not in v1beta1 // nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in, out, s) +func Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { + return autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because @@ -89,7 +89,7 @@ func Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(in *v1beta2. out.QuerierURL = manual.QuerierURL out.StatusURL = manual.StatusURL out.TenantID = manual.TenantID - out.AuthToken = manual.AuthToken + out.AuthToken = utilconversion.PascalToUpper(string(manual.AuthToken), '_') if err := Convert_v1beta2_ClientTLS_To_v1beta1_ClientTLS(&manual.TLS, &out.TLS, nil); err != nil { return fmt.Errorf("copying Loki v1beta2 TLS into v1beta1 TLS: %w", err) } @@ -109,7 +109,7 @@ func Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowColl QuerierURL: in.QuerierURL, StatusURL: in.StatusURL, TenantID: in.TenantID, - AuthToken: in.AuthToken, + AuthToken: v1beta2.LokiAuthToken(utilconversion.UpperToPascal(in.AuthToken)), } // fallback on ingester url if querier is not set if len(out.Manual.QuerierURL) == 0 { @@ -124,13 +124,6 @@ func Convert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowColl return autoConvert_v1beta1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) } -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1beta1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { - return autoConvert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(in, out, s) -} - // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta2 not in v1beta1 // nolint:golint,stylecheck,revive @@ -143,3 +136,181 @@ func Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1bet out.IncludeList = &includeList return nil } + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s); err != nil { + return err + } + out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(utilconversion.UpperToPascal(in.DeploymentModel)) + out.Exporters = []*v1beta2.FlowCollectorExporter{} + for _, inExporter := range in.Exporters { + outExporter := &v1beta2.FlowCollectorExporter{} + if err := Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(inExporter, outExporter, s); err != nil { + return err + } + out.Exporters = append(out.Exporters, outExporter) + } + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in, out, s); err != nil { + return err + } + out.DeploymentModel = utilconversion.PascalToUpper(string(in.DeploymentModel), '_') + out.Exporters = []*FlowCollectorExporter{} + for _, inExporter := range in.Exporters { + outExporter := &FlowCollectorExporter{} + if err := Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(inExporter, outExporter, s); err != nil { + return err + } + out.Exporters = append(out.Exporters, outExporter) + } + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s); err != nil { + return err + } + out.Type = v1beta2.FlowCollectorAgentType(utilconversion.UpperToPascal(in.Type)) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in, out, s); err != nil { + return err + } + out.Type = utilconversion.PascalToUpper(string(in.Type), '_') + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// and new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s); err != nil { + return err + } + if in.LogTypes != nil { + logTypes := v1beta2.FLPLogTypes(utilconversion.UpperToPascal(*in.LogTypes)) + out.LogTypes = &logTypes + } + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// and new defined fields in v1beta2 not in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in, out, s); err != nil { + return err + } + if in.LogTypes != nil { + str := utilconversion.PascalToUpper(string(*in.LogTypes), '_') + out.LogTypes = &str + } + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in, out, s); err != nil { + return err + } + out.Type = v1beta2.ServerTLSConfigType(utilconversion.UpperToPascal(string(in.Type))) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in, out, s); err != nil { + return err + } + out.Type = ServerTLSConfigType(utilconversion.PascalToUpper(string(in.Type), '_')) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s); err != nil { + return err + } + out.Status = v1beta2.HPAStatus(utilconversion.UpperToPascal(in.Status)) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s); err != nil { + return err + } + out.Status = utilconversion.PascalToUpper(string(in.Status), '_') + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in, out, s); err != nil { + return err + } + out.Type = v1beta2.SASLType(utilconversion.UpperToPascal(string(in.Type))) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in, out, s); err != nil { + return err + } + out.Type = SASLType(utilconversion.PascalToUpper(string(in.Type), '-')) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s); err != nil { + return err + } + out.Type = v1beta2.ExporterType(utilconversion.UpperToPascal(string(in.Type))) + return nil +} + +// This function need to be manually created because conversion-gen not able to create it intentionally because +// we have camel case enum in v1beta2 which were uppercase in v1beta1 +// nolint:golint,stylecheck,revive +func Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { + if err := autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in, out, s); err != nil { + return err + } + out.Type = ExporterType(utilconversion.PascalToUpper(string(in.Type), '_')) + return nil +} diff --git a/api/v1beta1/flowcollector_webhook_test.go b/api/v1beta1/flowcollector_webhook_test.go index 1617dae0a..f48b7bdc8 100644 --- a/api/v1beta1/flowcollector_webhook_test.go +++ b/api/v1beta1/flowcollector_webhook_test.go @@ -44,7 +44,7 @@ func TestBeta1ConversionRoundtrip_Loki(t *testing.T) { assert.Equal("http://loki/status", converted.Spec.Loki.Manual.StatusURL) assert.Equal("http://loki/querier", converted.Spec.Loki.Manual.QuerierURL) assert.Equal("tenant", converted.Spec.Loki.Manual.TenantID) - assert.Equal(LokiAuthForwardUserToken, converted.Spec.Loki.Manual.AuthToken) + assert.Equal(v1beta2.LokiAuthForwardUserToken, converted.Spec.Loki.Manual.AuthToken) assert.True(converted.Spec.Loki.Manual.TLS.Enable) assert.True(converted.Spec.Loki.Manual.TLS.InsecureSkipVerify) assert.True(converted.Spec.Loki.Manual.StatusTLS.Enable) diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 5e3e48dd0..27298ef24 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -108,16 +108,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) }); err != nil { @@ -128,28 +118,33 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) }); err != nil { return err } @@ -173,16 +168,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) }); err != nil { @@ -223,28 +208,28 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*SASLConfig)(nil), (*v1beta2.SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(a.(*SASLConfig), b.(*v1beta2.SASLConfig), scope) + if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.SASLConfig)(nil), (*SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(a.(*v1beta2.SASLConfig), b.(*SASLConfig), scope) + if err := s.AddConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) + if err := s.AddConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) + if err := s.AddConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) + if err := s.AddConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) }); err != nil { return err } @@ -253,21 +238,66 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*SASLConfig)(nil), (*v1beta2.SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(a.(*SASLConfig), b.(*v1beta2.SASLConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FLPMetrics_To_v1beta1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FlowCollectorLoki_To_v1beta1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.SASLConfig)(nil), (*SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(a.(*v1beta2.SASLConfig), b.(*SASLConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) + }); err != nil { + return err + } return nil } @@ -473,16 +503,24 @@ func Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in *v1beta2.FlowColl } func autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - out.Type = in.Type - // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type - // WARNING: in.EBPF requires manual conversion: does not exist in peer-type + out.Type = v1beta2.FlowCollectorAgentType(in.Type) + if err := Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } return nil } func autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - out.Type = in.Type - // WARNING: in.Ipfix requires manual conversion: does not exist in peer-type - // WARNING: in.Ebpf requires manual conversion: does not exist in peer-type + out.Type = string(in.Type) + if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { + return err + } + if err := Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { + return err + } return nil } @@ -532,38 +570,74 @@ func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsoleP return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s) } +func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + out.Features = *(*[]v1beta2.AgentFeature)(unsafe.Pointer(&in.Features)) + return nil +} + +// Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + out.ImagePullPolicy = in.ImagePullPolicy + out.Resources = in.Resources + out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) + out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) + out.LogLevel = in.LogLevel + out.Privileged = in.Privileged + out.KafkaBatchSize = in.KafkaBatchSize + if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { + return err + } + out.Features = *(*[]AgentFeature)(unsafe.Pointer(&in.Features)) + return nil +} + +// Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in, out, s) +} + func autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { out.Type = v1beta2.ExporterType(in.Type) if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIpfixReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil } -// Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { out.Type = ExporterType(in.Type) if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta2_FlowCollectorIpfixReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil } -// Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(in, out, s) -} - func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { out.Port = in.Port out.HealthPort = in.HealthPort @@ -582,7 +656,7 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCo } out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize - out.LogTypes = (*string)(unsafe.Pointer(in.LogTypes)) + out.LogTypes = (*v1beta2.FLPLogTypes)(unsafe.Pointer(in.LogTypes)) out.ConversationHeartbeatInterval = (*v1.Duration)(unsafe.Pointer(in.ConversationHeartbeatInterval)) out.ConversationEndTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationEndTimeout)) out.ConversationTerminatingTimeout = (*v1.Duration)(unsafe.Pointer(in.ConversationTerminatingTimeout)) @@ -593,11 +667,6 @@ func autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCo return nil } -// Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { out.Port = in.Port out.HealthPort = in.HealthPort @@ -628,29 +697,81 @@ func autoConvert_v1beta2_FlowCollectorFLP_To_v1beta1_FlowCollectorFLP(in *v1beta } func autoConvert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { - out.Status = in.Status + out.Status = v1beta2.HPAStatus(in.Status) out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) return nil } -// Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - out.Status = in.Status + out.Status = string(in.Status) out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) out.MaxReplicas = in.MaxReplicas out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) return nil } -// Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s) +func autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + out.CacheActiveTimeout = in.CacheActiveTimeout + out.CacheMaxFlows = in.CacheMaxFlows + out.Sampling = in.Sampling + out.ForceSampleAll = in.ForceSampleAll + if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { + return err + } + if err := Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in, out, s) +} + +func autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) +} + +func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + out.TargetHost = in.TargetHost + out.TargetPort = in.TargetPort + out.Transport = in.Transport + return nil +} + +// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in, out, s) } func autoConvert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { @@ -779,19 +900,14 @@ func autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *Flow if err := Convert_v1beta1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } - out.DeploymentModel = in.DeploymentModel + out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(in.DeploymentModel) if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - out.Exporters = *(*[]*v1beta2.FlowCollectorExporter)(unsafe.Pointer(&in.Exporters)) + // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s) -} - func autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { out.Namespace = in.Namespace if err := Convert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { @@ -806,19 +922,14 @@ func autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1be if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { return err } - out.DeploymentModel = in.DeploymentModel + out.DeploymentModel = string(in.DeploymentModel) if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - out.Exporters = *(*[]*FlowCollectorExporter)(unsafe.Pointer(&in.Exporters)) + // INFO: in.Exporters opted out of conversion generation return nil } -// Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorSpec_To_v1beta1_FlowCollectorSpec(in, out, s) -} - func autoConvert_v1beta1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Namespace = in.Namespace @@ -926,11 +1037,6 @@ func autoConvert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v return nil } -// Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig is an autogenerated conversion function. -func Convert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s conversion.Scope) error { - return autoConvert_v1beta1_SASLConfig_To_v1beta2_SASLConfig(in, out, s) -} - func autoConvert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { out.Type = SASLType(in.Type) if err := Convert_v1beta2_FileReference_To_v1beta1_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { @@ -942,11 +1048,6 @@ func autoConvert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig return nil } -// Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig is an autogenerated conversion function. -func Convert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { - return autoConvert_v1beta2_SASLConfig_To_v1beta1_SASLConfig(in, out, s) -} - func autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { out.Type = v1beta2.ServerTLSConfigType(in.Type) out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) @@ -955,11 +1056,6 @@ func autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1be return nil } -// Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS is an autogenerated conversion function. -func Convert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { - return autoConvert_v1beta1_ServerTLS_To_v1beta2_ServerTLS(in, out, s) -} - func autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { out.Type = ServerTLSConfigType(in.Type) out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) @@ -967,8 +1063,3 @@ func autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, o out.ProvidedCaFile = (*FileReference)(unsafe.Pointer(in.ProvidedCaFile)) return nil } - -// Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS is an autogenerated conversion function. -func Convert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { - return autoConvert_v1beta2_ServerTLS_To_v1beta1_ServerTLS(in, out, s) -} diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 98fad9620..766279456 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -23,11 +23,11 @@ import ( // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type FlowCollectorDeploymentModel string + const ( - AgentIpfix = "Ipfix" - AgentEbpf = "Ebpf" - DeploymentModelDirect = "Direct" - DeploymentModelKafka = "Kafka" + DeploymentModelDirect FlowCollectorDeploymentModel = "Direct" + DeploymentModelKafka FlowCollectorDeploymentModel = "Kafka" ) // Please notice that the FlowCollectorSpec's properties MUST redefine one of the default @@ -70,7 +70,7 @@ type FlowCollectorSpec struct { // +unionDiscriminator // +kubebuilder:validation:Enum:="Direct";"Kafka" // +kubebuilder:default:=Direct - DeploymentModel string `json:"deploymentModel,omitempty"` + DeploymentModel FlowCollectorDeploymentModel `json:"deploymentModel,omitempty"` // Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`. // +optional @@ -78,38 +78,46 @@ type FlowCollectorSpec struct { // `exporters` define additional optional exporters for custom consumption or storage. // +optional + // +k8s:conversion-gen=false Exporters []*FlowCollectorExporter `json:"exporters"` } +type FlowCollectorAgentType string + +const ( + AgentIPFIX FlowCollectorAgentType = "IPFIX" + AgentEBPF FlowCollectorAgentType = "eBPF" +) + // `FlowCollectorAgent` is a discriminated union that allows to select either ipfix or ebpf, but does not // allow defining both fields. // +union type FlowCollectorAgent struct { // `type` selects the flows tracing agent. Possible values are:
- // - `Ebpf` (default) to use NetObserv eBPF agent.
- // - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
- // `Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. - // `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, + // - `eBPF` (default) to use NetObserv eBPF agent.
+ // - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
+ // `eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. + // `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, // but they would require manual configuration). // +unionDiscriminator - // +kubebuilder:validation:Enum:="Ebpf";"Ipfix" - // +kubebuilder:default:=Ebpf - Type string `json:"type,omitempty"` + // +kubebuilder:validation:Enum:="eBPF";"IPFIX" + // +kubebuilder:default:=eBPF + Type FlowCollectorAgentType `json:"type,omitempty"` - // `ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` - // is set to `Ipfix`. + // `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` + // is set to `IPFIX`. // +optional - Ipfix FlowCollectorIpfix `json:"ipfix,omitempty"` + IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` // `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` - // is set to `Ebpf`. + // is set to `EBPF`. // +optional - Ebpf FlowCollectorEbpf `json:"ebpf,omitempty"` + EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` } -// `FlowCollectorIpfix` defines a FlowCollector that uses Ipfix on OVN-Kubernetes to collect the +// `FlowCollectorIPFIX` defines a FlowCollector that uses IPFIX on OVN-Kubernetes to collect the // flows information -type FlowCollectorIpfix struct { +type FlowCollectorIPFIX struct { // Important: Run "make generate" to regenerate code after modifying this file //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ @@ -127,12 +135,12 @@ type FlowCollectorIpfix struct { // `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. // To ensure cluster stability, it is not possible to set a value below 2. // If you really want to sample every packet, which might impact the cluster stability, - // refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix. + // refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX. Sampling int32 `json:"sampling,omitempty" mapstructure:"sampling,omitempty"` //+kubebuilder:default:=false - // `forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. - // It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. + // `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. + // It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. // If you REALLY want to do that, set this flag to `true`. Use at your own risk. // When it is set to `true`, the value of `sampling` is ignored. ForceSampleAll bool `json:"forceSampleAll,omitempty" mapstructure:"-"` @@ -140,7 +148,7 @@ type FlowCollectorIpfix struct { // `clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available. ClusterNetworkOperator ClusterNetworkOperatorConfig `json:"clusterNetworkOperator,omitempty" mapstructure:"-"` - // `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. + // `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` } @@ -157,8 +165,8 @@ const ( FlowRTT AgentFeature = "FlowRTT" ) -// `FlowCollectorEbpf` defines a FlowCollector that uses eBPF to collect the flows information -type FlowCollectorEbpf struct { +// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information +type FlowCollectorEBPF struct { // Important: Run "make generate" to regenerate code after modifying this file //+kubebuilder:validation:Enum=IfNotPresent;Always;Never @@ -263,29 +271,29 @@ type FlowCollectorKafka struct { SASL SASLConfig `json:"sasl"` } -type FlowCollectorIpfixReceiver struct { +type FlowCollectorIPFIXReceiver struct { //+kubebuilder:default:="" - // Address of the Ipfix external receiver + // Address of the IPFIX external receiver TargetHost string `json:"targetHost"` - // Port for the Ipfix external receiver + // Port for the IPFIX external receiver TargetPort int `json:"targetPort"` - // Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`. + // Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. // +unionDiscriminator // +kubebuilder:validation:Enum:="TCP";"UDP" // +optional Transport string `json:"transport,omitempty"` } +type ServerTLSConfigType string + const ( - ServerTLSDisabled = "Disabled" - ServerTLSProvided = "Provided" - ServerTLSAuto = "Auto" + ServerTLSDisabled ServerTLSConfigType = "Disabled" + ServerTLSProvided ServerTLSConfigType = "Provided" + ServerTLSAuto ServerTLSConfigType = "Auto" ) -type ServerTLSConfigType string - // `ServerTLS` define the TLS configuration, server side type ServerTLS struct { // Select the type of TLS configuration:
@@ -326,11 +334,6 @@ type MetricsServerConfig struct { TLS ServerTLS `json:"tls"` } -const ( - AlertNoFlows = "NetObservNoFlows" - AlertLokiError = "NetObservLokiError" -) - // Name of a processor alert. // Possible values are:
// - `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
@@ -338,6 +341,11 @@ const ( // +kubebuilder:validation:Enum:="NetObservNoFlows";"NetObservLokiError" type FLPAlert string +const ( + AlertNoFlows FLPAlert = "NetObservNoFlows" + AlertLokiError FLPAlert = "NetObservLokiError" +) + // `FLPMetrics` define the desired FLP configuration regarding metrics type FLPMetrics struct { // Metrics server endpoint configuration for Prometheus scraper @@ -362,11 +370,13 @@ type FLPMetrics struct { DisableAlerts []FLPAlert `json:"disableAlerts"` } +type FLPLogTypes string + const ( - LogTypeFlows = "Flows" - LogTypeConversations = "Conversations" - LogTypeEndedConversations = "EndedConversations" - LogTypeAll = "All" + LogTypeFlows FLPLogTypes = "Flows" + LogTypeConversations FLPLogTypes = "Conversations" + LogTypeEndedConversations FLPLogTypes = "EndedConversations" + LogTypeAll FLPLogTypes = "All" ) // `FlowCollectorFLP` defines the desired flowlogs-pipeline state of FlowCollector @@ -449,7 +459,7 @@ type FlowCollectorFLP struct { // +kubebuilder:validation:Optional // +kubebuilder:validation:Enum:="Flows";"Conversations";"EndedConversations";"All" // +kubebuilder:default:=Flows - LogTypes *string `json:"logTypes,omitempty"` + LogTypes *FLPLogTypes `json:"logTypes,omitempty"` //+kubebuilder:default:="30s" // +optional @@ -479,9 +489,11 @@ type FlowCollectorFLP struct { Debug DebugConfig `json:"debug,omitempty"` } +type HPAStatus string + const ( - HPAStatusDisabled = "Disabled" - HPAStatusEnabled = "Enabled" + HPAStatusDisabled HPAStatus = "Disabled" + HPAStatusEnabled HPAStatus = "Enabled" ) type FlowCollectorHPA struct { @@ -490,7 +502,7 @@ type FlowCollectorHPA struct { // `status` describes the desired status regarding deploying an horizontal pod autoscaler.
// - `Disabled` does not deploy an horizontal pod autoscaler.
// - `Enabled` deploys an horizontal pod autoscaler.
- Status string `json:"status,omitempty"` + Status HPAStatus `json:"status,omitempty"` // `minReplicas` is the lower limit for the number of replicas to which the autoscaler // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the @@ -510,10 +522,12 @@ type FlowCollectorHPA struct { Metrics []ascv2.MetricSpec `json:"metrics"` } +type LokiAuthToken string + const ( - LokiAuthDisabled = "Disabled" - LokiAuthUseHostToken = "Host" - LokiAuthForwardUserToken = "Forward" + LokiAuthDisabled LokiAuthToken = "Disabled" + LokiAuthUseHostToken LokiAuthToken = "Host" + LokiAuthForwardUserToken LokiAuthToken = "Forward" ) // `LokiManualParams` defines the full connection parameters to Loki. @@ -551,7 +565,7 @@ type LokiManualParams struct { // - `Forward` forwards the user token for authorization.
// - `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
// When using the Loki Operator, this must be set to `Forward`. - AuthToken string `json:"authToken,omitempty"` + AuthToken LokiAuthToken `json:"authToken,omitempty"` // TLS client configuration for Loki URL. // +optional @@ -896,19 +910,18 @@ type DebugConfig struct { Env map[string]string `json:"env,omitempty"` } -// Add more exporter types below type ExporterType string const ( KafkaExporter ExporterType = "Kafka" - IpfixExporter ExporterType = "Ipfix" + IpfixExporter ExporterType = "IPFIX" ) // `FlowCollectorExporter` defines an additional exporter to send enriched flows to. type FlowCollectorExporter struct { - // `type` selects the type of exporters. The available options are `Kafka` and `Ipfix`. + // `type` selects the type of exporters. The available options are `Kafka` and `IPFIX`. // +unionDiscriminator - // +kubebuilder:validation:Enum:="Kafka";"Ipfix" + // +kubebuilder:validation:Enum:="Kafka";"IPFIX" // +kubebuilder:validation:Required Type ExporterType `json:"type"` @@ -916,9 +929,9 @@ type FlowCollectorExporter struct { // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` - // Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to. + // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. // +optional - IPFIX FlowCollectorIpfixReceiver `json:"ipfix,omitempty"` + IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` } // `FlowCollectorStatus` defines the observed state of FlowCollector @@ -936,7 +949,7 @@ type FlowCollectorStatus struct { // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster // +kubebuilder:printcolumn:name="Agent",type="string",JSONPath=`.spec.agent.type` -// +kubebuilder:printcolumn:name="Sampling (Ebpf)",type="string",JSONPath=`.spec.agent.ebpf.sampling` +// +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index d1a48062b..8aeef027d 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -198,8 +198,8 @@ func (in *FlowCollector) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowCollectorAgent) DeepCopyInto(out *FlowCollectorAgent) { *out = *in - out.Ipfix = in.Ipfix - in.Ebpf.DeepCopyInto(&out.Ebpf) + out.IPFIX = in.IPFIX + in.EBPF.DeepCopyInto(&out.EBPF) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorAgent. @@ -253,7 +253,7 @@ func (in *FlowCollectorConsolePlugin) DeepCopy() *FlowCollectorConsolePlugin { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorEbpf) DeepCopyInto(out *FlowCollectorEbpf) { +func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { *out = *in in.Resources.DeepCopyInto(&out.Resources) if in.Sampling != nil { @@ -279,12 +279,12 @@ func (in *FlowCollectorEbpf) DeepCopyInto(out *FlowCollectorEbpf) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEbpf. -func (in *FlowCollectorEbpf) DeepCopy() *FlowCollectorEbpf { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEBPF. +func (in *FlowCollectorEBPF) DeepCopy() *FlowCollectorEBPF { if in == nil { return nil } - out := new(FlowCollectorEbpf) + out := new(FlowCollectorEBPF) in.DeepCopyInto(out) return out } @@ -329,7 +329,7 @@ func (in *FlowCollectorFLP) DeepCopyInto(out *FlowCollectorFLP) { in.KafkaConsumerAutoscaler.DeepCopyInto(&out.KafkaConsumerAutoscaler) if in.LogTypes != nil { in, out := &in.LogTypes, &out.LogTypes - *out = new(string) + *out = new(FLPLogTypes) **out = **in } if in.ConversationHeartbeatInterval != nil { @@ -388,33 +388,33 @@ func (in *FlowCollectorHPA) DeepCopy() *FlowCollectorHPA { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIpfix) DeepCopyInto(out *FlowCollectorIpfix) { +func (in *FlowCollectorIPFIX) DeepCopyInto(out *FlowCollectorIPFIX) { *out = *in out.ClusterNetworkOperator = in.ClusterNetworkOperator out.OVNKubernetes = in.OVNKubernetes } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIpfix. -func (in *FlowCollectorIpfix) DeepCopy() *FlowCollectorIpfix { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIX. +func (in *FlowCollectorIPFIX) DeepCopy() *FlowCollectorIPFIX { if in == nil { return nil } - out := new(FlowCollectorIpfix) + out := new(FlowCollectorIPFIX) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIpfixReceiver) DeepCopyInto(out *FlowCollectorIpfixReceiver) { +func (in *FlowCollectorIPFIXReceiver) DeepCopyInto(out *FlowCollectorIPFIXReceiver) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIpfixReceiver. -func (in *FlowCollectorIpfixReceiver) DeepCopy() *FlowCollectorIpfixReceiver { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIXReceiver. +func (in *FlowCollectorIPFIXReceiver) DeepCopy() *FlowCollectorIPFIXReceiver { if in == nil { return nil } - out := new(FlowCollectorIpfixReceiver) + out := new(FlowCollectorIPFIXReceiver) in.DeepCopyInto(out) return out } diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index fd10ccadb..dc5271dc2 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -5108,7 +5108,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (Ebpf) + name: Sampling (EBPF) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -5148,7 +5148,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `Ebpf`.' + flow reporter when `spec.agent.type` is set to `EBPF`.' properties: cacheActiveTimeout: default: 5s @@ -5339,8 +5339,8 @@ spec: type: object ipfix: description: '`ipfix` [deprecated (*)] - describes the settings - related to the Ipfix-based flow reporter when `spec.agent.type` - is set to `Ipfix`.' + related to the IPFIX-based flow reporter when `spec.agent.type` + is set to `IPFIX`.' properties: cacheActiveTimeout: default: 20s @@ -5369,8 +5369,8 @@ spec: forceSampleAll: default: false description: '`forceSampleAll` allows disabling sampling in - the Ipfix-based flow reporter. It is not recommended to - sample all the traffic with Ipfix, as it might generate + the IPFIX-based flow reporter. It is not recommended to + sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' @@ -5378,7 +5378,7 @@ spec: ovnKubernetes: description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is - used when using OVN''s Ipfix exports, without OpenShift. + used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: @@ -5404,23 +5404,23 @@ spec: it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you - can use the eBPF Agent instead of Ipfix.' + can use the eBPF Agent instead of IPFIX.' format: int32 minimum: 2 type: integer type: object type: - default: Ebpf + default: eBPF description: '`type` selects the flows tracing agent. Possible - values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
- `Ebpf` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `Ipfix` + values are:
- `eBPF` (default) to use NetObserv eBPF agent.
+ - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
+ `eBPF` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting Ipfix, but they would require manual configuration).' + support exporting IPFIX, but they would require manual configuration).' enum: - - Ebpf - - Ipfix + - eBPF + - IPFIX type: string type: object consolePlugin: @@ -6147,19 +6147,19 @@ spec: to send enriched flows to.' properties: ipfix: - description: Ipfix configuration, such as the IP address and - port to send enriched Ipfix flows to. + description: IPFIX configuration, such as the IP address and + port to send enriched IPFIX flows to. properties: targetHost: default: "" - description: Address of the Ipfix external receiver + description: Address of the IPFIX external receiver type: string targetPort: - description: Port for the Ipfix external receiver + description: Port for the IPFIX external receiver type: integer transport: description: Transport protocol (`TCP` or `UDP`) to be used - for the Ipfix connection, defaults to `TCP`. + for the IPFIX connection, defaults to `TCP`. enum: - TCP - UDP @@ -6346,10 +6346,10 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `Kafka` and `Ipfix`.' + options are `Kafka` and `IPFIX`.' enum: - Kafka - - Ipfix + - IPFIX type: string required: - type diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 241d6d082..e6bf379e8 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -378,7 +378,7 @@ metadata: }, "sampling": 50 }, - "type": "Ebpf" + "type": "eBPF" }, "consolePlugin": { "autoscaler": { diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 1b2fc4bb3..561564686 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -5094,7 +5094,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (Ebpf) + name: Sampling (EBPF) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -5134,7 +5134,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `Ebpf`.' + flow reporter when `spec.agent.type` is set to `EBPF`.' properties: cacheActiveTimeout: default: 5s @@ -5325,8 +5325,8 @@ spec: type: object ipfix: description: '`ipfix` [deprecated (*)] - describes the settings - related to the Ipfix-based flow reporter when `spec.agent.type` - is set to `Ipfix`.' + related to the IPFIX-based flow reporter when `spec.agent.type` + is set to `IPFIX`.' properties: cacheActiveTimeout: default: 20s @@ -5355,8 +5355,8 @@ spec: forceSampleAll: default: false description: '`forceSampleAll` allows disabling sampling in - the Ipfix-based flow reporter. It is not recommended to - sample all the traffic with Ipfix, as it might generate + the IPFIX-based flow reporter. It is not recommended to + sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' @@ -5364,7 +5364,7 @@ spec: ovnKubernetes: description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is - used when using OVN''s Ipfix exports, without OpenShift. + used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: @@ -5390,23 +5390,23 @@ spec: it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you - can use the eBPF Agent instead of Ipfix.' + can use the eBPF Agent instead of IPFIX.' format: int32 minimum: 2 type: integer type: object type: - default: Ebpf + default: eBPF description: '`type` selects the flows tracing agent. Possible - values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
- `Ebpf` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `Ipfix` + values are:
- `eBPF` (default) to use NetObserv eBPF agent.
+ - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
+ `eBPF` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting Ipfix, but they would require manual configuration).' + support exporting IPFIX, but they would require manual configuration).' enum: - - Ebpf - - Ipfix + - eBPF + - IPFIX type: string type: object consolePlugin: @@ -6133,19 +6133,19 @@ spec: to send enriched flows to.' properties: ipfix: - description: Ipfix configuration, such as the IP address and - port to send enriched Ipfix flows to. + description: IPFIX configuration, such as the IP address and + port to send enriched IPFIX flows to. properties: targetHost: default: "" - description: Address of the Ipfix external receiver + description: Address of the IPFIX external receiver type: string targetPort: - description: Port for the Ipfix external receiver + description: Port for the IPFIX external receiver type: integer transport: description: Transport protocol (`TCP` or `UDP`) to be used - for the Ipfix connection, defaults to `TCP`. + for the IPFIX connection, defaults to `TCP`. enum: - TCP - UDP @@ -6332,10 +6332,10 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `Kafka` and `Ipfix`.' + options are `Kafka` and `IPFIX`.' enum: - Kafka - - Ipfix + - IPFIX type: string required: - type diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml index 9dc280d35..ff3d2dfeb 100644 --- a/config/samples/flows_v1beta2_flowcollector.yaml +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -6,7 +6,7 @@ spec: namespace: netobserv deploymentModel: Direct agent: - type: Ebpf + type: eBPF ebpf: imagePullPolicy: IfNotPresent sampling: 50 @@ -137,7 +137,7 @@ spec: # address: "kafka-cluster-kafka-bootstrap.netobserv" # topic: netobserv-flows-export # or - # - type: Ipfix + # - type: IPFIX # ipfix: # targetHost: "ipfix-collector.ipfix.svc.cluster.local" # targetPort: 4739 diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index 8245c9724..a0e2543d1 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -344,16 +344,16 @@ func (b *builder) setLokiConfig(lconf *config.LokiConfig) { } func (b *builder) setFrontendConfig(fconf *config.FrontendConfig) { - if helper.UseEbpf(b.desired) { - if helper.IsPktDropEnabled(&b.desired.Agent.Ebpf) { + if helper.UseEBPF(b.desired) { + if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { fconf.Features = append(fconf.Features, "pktDrop") } - if helper.IsDNSTrackingEnabled(&b.desired.Agent.Ebpf) { + if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { fconf.Features = append(fconf.Features, "dnsTracking") } - if helper.IsFlowRTTEnabled(&b.desired.Agent.Ebpf) { + if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { fconf.Features = append(fconf.Features, "flowRTT") } } diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index 5fcb357e7..2fbb72e14 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -105,7 +105,7 @@ func (c *AgentController) Reconcile( if err != nil { return fmt.Errorf("fetching current EBPF Agent: %w", err) } - if !helper.UseEbpf(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { + if !helper.UseEBPF(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { if current == nil { rlog.Info("nothing to do, as the requested agent is not eBPF", "currentAgent", target.Spec.Agent) @@ -125,7 +125,7 @@ func (c *AgentController) Reconcile( current = nil } - if err := c.permissions.Reconcile(ctx, &target.Spec.Agent.Ebpf); err != nil { + if err := c.permissions.Reconcile(ctx, &target.Spec.Agent.EBPF); err != nil { return fmt.Errorf("reconciling permissions: %w", err) } desired, err := c.desired(ctx, target, rlog) @@ -175,7 +175,7 @@ func newMountPropagationMode(m corev1.MountPropagationMode) *corev1.MountPropaga } func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCollector, rlog logr.Logger) (*v1.DaemonSet, error) { - if coll == nil || !helper.UseEbpf(&coll.Spec) { + if coll == nil || !helper.UseEBPF(&coll.Spec) { return nil, nil } version := helper.ExtractVersion(c.config.EBPFAgentImage) @@ -187,7 +187,7 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol volumeMounts := c.volumes.GetMounts() volumes := c.volumes.GetVolumes() - if helper.IsPrivileged(&coll.Spec.Agent.Ebpf) { + if helper.IsPrivileged(&coll.Spec.Agent.EBPF) { volume := corev1.Volume{ Name: bpfNetNSMountName, VolumeSource: corev1.VolumeSource{ @@ -206,8 +206,8 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol volumeMounts = append(volumeMounts, volumeMount) } - if helper.IsFeatureEnabled(&coll.Spec.Agent.Ebpf, flowslatest.PacketDrop) { - if !coll.Spec.Agent.Ebpf.Privileged { + if helper.IsFeatureEnabled(&coll.Spec.Agent.EBPF, flowslatest.PacketDrop) { + if !coll.Spec.Agent.EBPF.Privileged { rlog.Error(fmt.Errorf("invalid configuration"), "To use PacketsDrop feature privileged mode needs to be enabled") } else { volume := corev1.Volume{ @@ -257,8 +257,8 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol Containers: []corev1.Container{{ Name: constants.EBPFAgentName, Image: c.config.EBPFAgentImage, - ImagePullPolicy: corev1.PullPolicy(coll.Spec.Agent.Ebpf.ImagePullPolicy), - Resources: coll.Spec.Agent.Ebpf.Resources, + ImagePullPolicy: corev1.PullPolicy(coll.Spec.Agent.EBPF.ImagePullPolicy), + Resources: coll.Spec.Agent.EBPF.Resources, SecurityContext: c.securityContext(coll), Env: env, VolumeMounts: volumeMounts, @@ -277,9 +277,9 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC corev1.EnvVar{Name: envExport, Value: exportKafka}, corev1.EnvVar{Name: envKafkaBrokers, Value: coll.Spec.Kafka.Address}, corev1.EnvVar{Name: envKafkaTopic, Value: coll.Spec.Kafka.Topic}, - corev1.EnvVar{Name: envKafkaBatchSize, Value: strconv.Itoa(coll.Spec.Agent.Ebpf.KafkaBatchSize)}, + corev1.EnvVar{Name: envKafkaBatchSize, Value: strconv.Itoa(coll.Spec.Agent.EBPF.KafkaBatchSize)}, // For easier user configuration, we can assume a constant message size per flow (~100B in protobuf) - corev1.EnvVar{Name: envKafkaBatchMessages, Value: strconv.Itoa(coll.Spec.Agent.Ebpf.KafkaBatchSize / averageMessageSize)}, + corev1.EnvVar{Name: envKafkaBatchMessages, Value: strconv.Itoa(coll.Spec.Agent.EBPF.KafkaBatchSize / averageMessageSize)}, ) if coll.Spec.Kafka.TLS.Enable { // Annotate pod with certificate reference so that it is reloaded if modified @@ -374,8 +374,8 @@ func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *core RunAsUser: ptr.To(int64(0)), } - if coll.Spec.Agent.Ebpf.Privileged { - sc.Privileged = &coll.Spec.Agent.Ebpf.Privileged + if coll.Spec.Agent.EBPF.Privileged { + sc.Privileged = &coll.Spec.Agent.EBPF.Privileged } else { sc.Capabilities = &corev1.Capabilities{Add: permissions.AllowedCapabilities} } @@ -386,42 +386,42 @@ func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *core func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1.EnvVar { var config []corev1.EnvVar - if coll.Spec.Agent.Ebpf.CacheActiveTimeout != "" { + if coll.Spec.Agent.EBPF.CacheActiveTimeout != "" { config = append(config, corev1.EnvVar{ Name: envCacheActiveTimeout, - Value: coll.Spec.Agent.Ebpf.CacheActiveTimeout, + Value: coll.Spec.Agent.EBPF.CacheActiveTimeout, }) } - if coll.Spec.Agent.Ebpf.CacheMaxFlows != 0 { + if coll.Spec.Agent.EBPF.CacheMaxFlows != 0 { config = append(config, corev1.EnvVar{ Name: envCacheMaxFlows, - Value: strconv.Itoa(int(coll.Spec.Agent.Ebpf.CacheMaxFlows)), + Value: strconv.Itoa(int(coll.Spec.Agent.EBPF.CacheMaxFlows)), }) } - if coll.Spec.Agent.Ebpf.LogLevel != "" { + if coll.Spec.Agent.EBPF.LogLevel != "" { config = append(config, corev1.EnvVar{ Name: envLogLevel, - Value: coll.Spec.Agent.Ebpf.LogLevel, + Value: coll.Spec.Agent.EBPF.LogLevel, }) } - if len(coll.Spec.Agent.Ebpf.Interfaces) > 0 { + if len(coll.Spec.Agent.EBPF.Interfaces) > 0 { config = append(config, corev1.EnvVar{ Name: envInterfaces, - Value: strings.Join(coll.Spec.Agent.Ebpf.Interfaces, envListSeparator), + Value: strings.Join(coll.Spec.Agent.EBPF.Interfaces, envListSeparator), }) } - if len(coll.Spec.Agent.Ebpf.ExcludeInterfaces) > 0 { + if len(coll.Spec.Agent.EBPF.ExcludeInterfaces) > 0 { config = append(config, corev1.EnvVar{ Name: envExcludeInterfaces, - Value: strings.Join(coll.Spec.Agent.Ebpf.ExcludeInterfaces, envListSeparator), + Value: strings.Join(coll.Spec.Agent.EBPF.ExcludeInterfaces, envListSeparator), }) } - sampling := coll.Spec.Agent.Ebpf.Sampling + sampling := coll.Spec.Agent.EBPF.Sampling if sampling != nil && *sampling > 1 { config = append(config, corev1.EnvVar{ Name: envSampling, @@ -429,7 +429,7 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 }) } - if helper.IsFlowRTTEnabled(&coll.Spec.Agent.Ebpf) { + if helper.IsFlowRTTEnabled(&coll.Spec.Agent.EBPF) { config = append(config, corev1.EnvVar{ Name: envEnableFlowRTT, Value: "true", @@ -438,8 +438,8 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 // set GOMEMLIMIT which allows specifying a soft memory cap to force GC when resource limit is reached // to prevent OOM - if coll.Spec.Agent.Ebpf.Resources.Limits.Memory() != nil { - if memLimit, ok := coll.Spec.Agent.Ebpf.Resources.Limits.Memory().AsInt64(); ok { + if coll.Spec.Agent.EBPF.Resources.Limits.Memory() != nil { + if memLimit, ok := coll.Spec.Agent.EBPF.Resources.Limits.Memory().AsInt64(); ok { // we will set the GOMEMLIMIT to current memlimit - 10% as a headroom to account for // memory sources the Go runtime is unaware of memLimit -= int64(float64(memLimit) * 0.1) @@ -447,14 +447,14 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 } } - if helper.IsPktDropEnabled(&coll.Spec.Agent.Ebpf) { + if helper.IsPktDropEnabled(&coll.Spec.Agent.EBPF) { config = append(config, corev1.EnvVar{ Name: envEnablePktDrop, Value: "true", }) } - if helper.IsDNSTrackingEnabled(&coll.Spec.Agent.Ebpf) { + if helper.IsDNSTrackingEnabled(&coll.Spec.Agent.EBPF) { config = append(config, corev1.EnvVar{ Name: envEnableDNSTracking, Value: "true", @@ -465,7 +465,7 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 dedupJustMark := dedupeJustMarkDefault // we need to sort env map to keep idempotency, // as equal maps could be iterated in different order - for _, pair := range helper.KeySorted(coll.Spec.Agent.Ebpf.Debug.Env) { + for _, pair := range helper.KeySorted(coll.Spec.Agent.EBPF.Debug.Env) { k, v := pair[0], pair[1] if k == envDedupe { dedup = v diff --git a/controllers/ebpf/internal/permissions/permissions.go b/controllers/ebpf/internal/permissions/permissions.go index 76073be6a..4b41a8c31 100644 --- a/controllers/ebpf/internal/permissions/permissions.go +++ b/controllers/ebpf/internal/permissions/permissions.go @@ -33,7 +33,7 @@ func NewReconciler(cmn *reconcilers.Common) Reconciler { return Reconciler{Common: *cmn} } -func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollectorEbpf) error { +func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollectorEBPF) error { log.IntoContext(ctx, log.FromContext(ctx).WithName("permissions")) if err := c.reconcileNamespace(ctx); err != nil { @@ -121,7 +121,7 @@ func (c *Reconciler) reconcileServiceAccount(ctx context.Context) error { } func (c *Reconciler) reconcileVendorPermissions( - ctx context.Context, desired *flowslatest.FlowCollectorEbpf, + ctx context.Context, desired *flowslatest.FlowCollectorEBPF, ) error { if c.UseOpenShiftSCC { return c.reconcileOpenshiftPermissions(ctx, desired) @@ -130,7 +130,7 @@ func (c *Reconciler) reconcileVendorPermissions( } func (c *Reconciler) reconcileOpenshiftPermissions( - ctx context.Context, desired *flowslatest.FlowCollectorEbpf, + ctx context.Context, desired *flowslatest.FlowCollectorEBPF, ) error { rlog := log.FromContext(ctx, "securityContextConstraints", constants.EBPFSecurityContext) diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index cac23d223..1239068b3 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -148,12 +148,12 @@ func (r *FlowCollectorReconciler) Reconcile(ctx context.Context, _ ctrl.Request) // OVS config map for CNO if r.availableAPIs.HasCNO() { - ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.Ipfix.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) + ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.IPFIX.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileCNOFailed(err), desired) } } else { - ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.Ipfix.OVNKubernetes) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.IPFIX.OVNKubernetes) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileOVNKFailed(err), desired) } @@ -380,7 +380,7 @@ func (r *FlowCollectorReconciler) finalize(ctx context.Context, desired *flowsla if !r.availableAPIs.HasCNO() { ns := getNamespaceName(desired) info := r.newCommonInfo(ctx, desired, ns, ns, nil, func(b bool) {}, func(b bool) {}) - ovsConfigController := ovs.NewFlowsConfigOVNKController(&info, desired.Spec.Agent.Ipfix.OVNKubernetes) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&info, desired.Spec.Agent.IPFIX.OVNKubernetes) if err := ovsConfigController.Finalize(ctx, desired); err != nil { return fmt.Errorf("failed to finalize ovn-kubernetes reconciler: %w", err) } diff --git a/controllers/flowcollector_controller_certificates_test.go b/controllers/flowcollector_controller_certificates_test.go index 907b76ba6..b8ed76ee8 100644 --- a/controllers/flowcollector_controller_certificates_test.go +++ b/controllers/flowcollector_controller_certificates_test.go @@ -141,7 +141,7 @@ func flowCollectorCertificatesSpecs() { Namespace: operatorNamespace, DeploymentModel: flowslatest.DeploymentModelKafka, Agent: flowslatest.FlowCollectorAgent{ - Type: "EBPF", + Type: "eBPF", }, Loki: flowslatest.FlowCollectorLoki{ Enable: ptr.To(true), @@ -182,7 +182,7 @@ func flowCollectorCertificatesSpecs() { }, }, SASL: flowslatest.SASLConfig{ - Type: "PLAIN", + Type: "Plain", ClientIDReference: flowslatest.FileReference{ Type: flowslatest.RefTypeSecret, Name: kafka2Sasl.Name, diff --git a/controllers/flowcollector_controller_ebpf_test.go b/controllers/flowcollector_controller_ebpf_test.go index fe9a2d32a..ff948686d 100644 --- a/controllers/flowcollector_controller_ebpf_test.go +++ b/controllers/flowcollector_controller_ebpf_test.go @@ -61,8 +61,8 @@ func flowCollectorEBPFSpecs() { LogLevel: "error", }, Agent: flowslatest.FlowCollectorAgent{ - Type: "EBPF", - Ebpf: flowslatest.FlowCollectorEbpf{ + Type: "eBPF", + EBPF: flowslatest.FlowCollectorEBPF{ Sampling: ptr.To(int32(123)), CacheActiveTimeout: "15s", CacheMaxFlows: 100, @@ -148,9 +148,9 @@ func flowCollectorEBPFSpecs() { It("Should update fields that have changed", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - Expect(*fc.Spec.Agent.Ebpf.Sampling).To(Equal(int32(123))) - *fc.Spec.Agent.Ebpf.Sampling = 4 - fc.Spec.Agent.Ebpf.Privileged = true + Expect(*fc.Spec.Agent.EBPF.Sampling).To(Equal(int32(123))) + *fc.Spec.Agent.EBPF.Sampling = 4 + fc.Spec.Agent.EBPF.Privileged = true }) ds := appsv1.DaemonSet{} @@ -281,7 +281,7 @@ func flowCollectorEBPFKafkaSpecs() { ObjectMeta: metav1.ObjectMeta{Name: crKey.Name}, Spec: flowslatest.FlowCollectorSpec{ Namespace: operatorNamespace, - Agent: flowslatest.FlowCollectorAgent{Type: "Ebpf"}, + Agent: flowslatest.FlowCollectorAgent{Type: "eBPF"}, DeploymentModel: flowslatest.DeploymentModelKafka, Kafka: flowslatest.FlowCollectorKafka{ Address: "kafka-cluster-kafka-bootstrap", diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index 1760063d9..f1bb35053 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -87,8 +87,8 @@ func flowCollectorIsoSpecs() { DropUnusedFields: ptr.To(false), }, Agent: flowslatest.FlowCollectorAgent{ - Type: "Ebpf", - Ipfix: flowslatest.FlowCollectorIpfix{ + Type: "eBPF", + IPFIX: flowslatest.FlowCollectorIPFIX{ Sampling: 2, // 0 is forbidden here CacheActiveTimeout: "5s", CacheMaxFlows: 100, @@ -102,7 +102,7 @@ func flowCollectorIsoSpecs() { ContainerName: "test", }, }, - Ebpf: flowslatest.FlowCollectorEbpf{ + EBPF: flowslatest.FlowCollectorEBPF{ Sampling: &zero, CacheActiveTimeout: "5s", CacheMaxFlows: 100, diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 028fa83bd..6a10a58ee 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -122,8 +122,8 @@ func flowCollectorControllerSpecs() { }, }, Agent: flowslatest.FlowCollectorAgent{ - Type: "Ipfix", - Ipfix: flowslatest.FlowCollectorIpfix{ + Type: "IPFIX", + IPFIX: flowslatest.FlowCollectorIPFIX{ Sampling: 200, }, }, @@ -283,7 +283,7 @@ func flowCollectorControllerSpecs() { }, } fc.Spec.Loki = flowslatest.FlowCollectorLoki{} - fc.Spec.Agent.Ipfix = flowslatest.FlowCollectorIpfix{ + fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{ Sampling: 400, CacheActiveTimeout: "30s", CacheMaxFlows: 1000, @@ -399,7 +399,7 @@ func flowCollectorControllerSpecs() { if err := k8sClient.Get(ctx, crKey, &fc); err != nil { return err } - fc.Spec.Agent.Ipfix.Sampling = 1 + fc.Spec.Agent.IPFIX.Sampling = 1 return k8sClient.Update(ctx, &fc) }).Should(Satisfy(func(err error) bool { return err != nil && strings.Contains(err.Error(), "spec.agent.ipfix.sampling: Invalid value: 1") @@ -410,8 +410,8 @@ func flowCollectorControllerSpecs() { if err := k8sClient.Get(ctx, crKey, &fc); err != nil { return err } - fc.Spec.Agent.Ipfix.Sampling = 10 - fc.Spec.Agent.Ipfix.ForceSampleAll = true + fc.Spec.Agent.IPFIX.Sampling = 10 + fc.Spec.Agent.IPFIX.ForceSampleAll = true return k8sClient.Update(ctx, &fc) }).Should(Succeed()) @@ -888,7 +888,7 @@ func flowCollectorControllerSpecs() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { fc.Spec.Processor.Port = 9999 fc.Spec.Namespace = otherNamespace - fc.Spec.Agent.Ipfix = flowslatest.FlowCollectorIpfix{ + fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{ Sampling: 200, } }) diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index a10f47e55..dace20543 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -74,6 +74,8 @@ func newBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCollectorSp name := name(ck) var promTLS *flowslatest.CertificateReference switch desired.Processor.Metrics.Server.TLS.Type { + case flowslatest.ServerTLSDisabled: + // nothing to do here case flowslatest.ServerTLSProvided: promTLS = desired.Processor.Metrics.Server.TLS.Provided if promTLS == nil { @@ -117,7 +119,7 @@ func (b *builder) serviceMonitorName() string { return serviceMonitorName(b.conf func (b *builder) prometheusRuleName() string { return prometheusRuleName(b.confKind) } func (b *builder) portProtocol() corev1.Protocol { - if helper.UseEbpf(b.desired) { + if helper.UseEBPF(b.desired) { return corev1.ProtocolTCP } return corev1.ProtocolUDP @@ -393,7 +395,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P }, } - if helper.IsPktDropEnabled(&b.desired.Agent.Ebpf) { + if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { outputPktDropFields := []api.OutputField{ { Name: "PktDropBytes", @@ -425,7 +427,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P outputFields = append(outputFields, outputPktDropFields...) } - if helper.IsDNSTrackingEnabled(&b.desired.Agent.Ebpf) { + if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { outDNSTrackingFields := []api.OutputField{ { Name: "DnsFlagsResponseCode", @@ -439,7 +441,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P outputFields = append(outputFields, outDNSTrackingFields...) } - if helper.IsFlowRTTEnabled(&b.desired.Agent.Ebpf) { + if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { outputFields = append(outputFields, api.OutputField{ Name: "MaxTimeFlowRttNs", Operation: "max", @@ -524,7 +526,7 @@ func (b *builder) addTransformFilter(lastStage config.PipelineBuilderStage) conf // Filter-out unused fields? if helper.PtrBool(b.desired.Processor.DropUnusedFields) { - if helper.UseIpfix(b.desired) { + if helper.UseIPFIX(b.desired) { rules := filters.GetOVSGoflowUnusedRules() transformFilterRules = append(transformFilterRules, rules...) } @@ -544,7 +546,7 @@ func (b *builder) addCustomExportStages(enrichedStage *config.PipelineBuilderSta b.createKafkaWriteStage(fmt.Sprintf("kafka-export-%d", i), &exporter.Kafka, enrichedStage) } if exporter.Type == flowslatest.IpfixExporter { - createIpfixWriteStage(fmt.Sprintf("IPFIX-export-%d", i), &exporter.IPFIX, enrichedStage) + createIPFIXWriteStage(fmt.Sprintf("IPFIX-export-%d", i), &exporter.IPFIX, enrichedStage) } } } @@ -558,11 +560,11 @@ func (b *builder) createKafkaWriteStage(name string, spec *flowslatest.FlowColle }) } -func createIpfixWriteStage(name string, spec *flowslatest.FlowCollectorIpfixReceiver, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { +func createIPFIXWriteStage(name string, spec *flowslatest.FlowCollectorIPFIXReceiver, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { return fromStage.WriteIpfix(name, api.WriteIpfix{ TargetHost: spec.TargetHost, TargetPort: spec.TargetPort, - Transport: getIpfixTransport(spec.Transport), + Transport: getIPFIXTransport(spec.Transport), EnterpriseID: 2, }) } @@ -597,7 +599,7 @@ func (b *builder) getKafkaSASL(sasl *flowslatest.SASLConfig, volumePrefix string } } -func getIpfixTransport(transport string) string { +func getIPFIXTransport(transport string) string { switch transport { case "UDP": return "udp" @@ -797,7 +799,7 @@ func (b *builder) prometheusRule() *monitoringv1.PrometheusRule { // Not receiving flows if shouldAddAlert(flowslatest.AlertNoFlows, b.desired.Processor.Metrics.DisableAlerts) { rules = append(rules, monitoringv1.Rule{ - Alert: flowslatest.AlertNoFlows, + Alert: string(flowslatest.AlertNoFlows), Annotations: map[string]string{ "description": "NetObserv flowlogs-pipeline is not receiving any flow, this is either a connection issue with the agent, or an agent issue", "summary": "NetObserv flowlogs-pipeline is not receiving any flow", @@ -814,7 +816,7 @@ func (b *builder) prometheusRule() *monitoringv1.PrometheusRule { // Flows getting dropped by loki library if shouldAddAlert(flowslatest.AlertLokiError, b.desired.Processor.Metrics.DisableAlerts) { rules = append(rules, monitoringv1.Rule{ - Alert: flowslatest.AlertLokiError, + Alert: string(flowslatest.AlertLokiError), Annotations: map[string]string{ "description": "NetObserv flowlogs-pipeline is dropping flows because of loki errors, loki may be down or having issues ingesting every flows. Please check loki and flowlogs-pipeline logs.", "summary": "NetObserv flowlogs-pipeline is dropping flows because of loki errors", diff --git a/controllers/flowlogspipeline/flp_ingest_objects.go b/controllers/flowlogspipeline/flp_ingest_objects.go index dc516caa8..a8c3cec8f 100644 --- a/controllers/flowlogspipeline/flp_ingest_objects.go +++ b/controllers/flowlogspipeline/flp_ingest_objects.go @@ -51,7 +51,7 @@ func (b *ingestBuilder) configMap() (*corev1.ConfigMap, string, error) { func (b *ingestBuilder) buildPipelineConfig() ([]config.Stage, []config.StageParam, error) { var pipeline config.PipelineBuilderStage - if helper.UseIpfix(b.generic.desired) { + if helper.UseIPFIX(b.generic.desired) { // IPFIX collector pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{ Port: int(b.generic.desired.Processor.Port), diff --git a/controllers/flowlogspipeline/flp_ingest_reconciler.go b/controllers/flowlogspipeline/flp_ingest_reconciler.go index f451b2ffd..195dd465b 100644 --- a/controllers/flowlogspipeline/flp_ingest_reconciler.go +++ b/controllers/flowlogspipeline/flp_ingest_reconciler.go @@ -79,7 +79,7 @@ func (r *flpIngesterReconciler) reconcile(ctx context.Context, desired *flowslat } // Ingester only used with Kafka and without eBPF - if !helper.UseKafka(&desired.Spec) || helper.UseEbpf(&desired.Spec) { + if !helper.UseKafka(&desired.Spec) || helper.UseEBPF(&desired.Spec) { r.Managed.TryDeleteAll(ctx) return nil } diff --git a/controllers/flowlogspipeline/flp_monolith_objects.go b/controllers/flowlogspipeline/flp_monolith_objects.go index c037bfc4d..e846d1e07 100644 --- a/controllers/flowlogspipeline/flp_monolith_objects.go +++ b/controllers/flowlogspipeline/flp_monolith_objects.go @@ -52,7 +52,7 @@ func (b *monolithBuilder) configMap() (*corev1.ConfigMap, string, error) { func (b *monolithBuilder) buildPipelineConfig() ([]config.Stage, []config.StageParam, error) { var pipeline config.PipelineBuilderStage - if helper.UseIpfix(b.generic.desired) { + if helper.UseIPFIX(b.generic.desired) { // IPFIX collector pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{ Port: int(b.generic.desired.Processor.Port), diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index 4f9944058..04981295e 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -59,7 +59,7 @@ func getConfig(lokiMode ...string) flowslatest.FlowCollectorSpec { return flowslatest.FlowCollectorSpec{ DeploymentModel: flowslatest.DeploymentModelDirect, - Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIpfix}, + Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIPFIX}, Processor: flowslatest.FlowCollectorFLP{ Port: 2055, ImagePullPolicy: string(pullPolicy), @@ -967,7 +967,7 @@ func TestPipelineWithExporter(t *testing.T) { cfg.Exporters = append(cfg.Exporters, &flowslatest.FlowCollectorExporter{ Type: flowslatest.IpfixExporter, - IPFIX: flowslatest.FlowCollectorIpfixReceiver{ + IPFIX: flowslatest.FlowCollectorIPFIXReceiver{ TargetHost: "ipfix-receiver-test", TargetPort: 9999, Transport: "TCP", diff --git a/controllers/flowlogspipeline/flp_transfo_objects.go b/controllers/flowlogspipeline/flp_transfo_objects.go index 603f052aa..7cab884c6 100644 --- a/controllers/flowlogspipeline/flp_transfo_objects.go +++ b/controllers/flowlogspipeline/flp_transfo_objects.go @@ -57,7 +57,7 @@ func (b *transfoBuilder) buildPipelineConfig() ([]config.Stage, []config.StagePa // For now, we leave this communication via JSON and just setup protobuf ingestion when // the transformer is communicating directly via eBPF agent decoder := api.Decoder{Type: "protobuf"} - if helper.UseIpfix(b.generic.desired) { + if helper.UseIPFIX(b.generic.desired) { decoder = api.Decoder{Type: "json"} } pipeline := config.NewKafkaPipeline("kafka-read", api.IngestKafka{ diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go index c8843dd49..1916372d9 100644 --- a/controllers/ovs/flowsconfig_cno_reconciler.go +++ b/controllers/ovs/flowsconfig_cno_reconciler.go @@ -38,7 +38,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl if err != nil { return err } - if !helper.UseIpfix(&target.Spec) { + if !helper.UseIPFIX(&target.Spec) { if current == nil { return nil } @@ -58,7 +58,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl // compare current and desired if current == nil { - rlog.Info("Provided Ipfix configuration. Creating " + c.ovsConfigMapName + " ConfigMap") + rlog.Info("Provided IPFIX configuration. Creating " + c.ovsConfigMapName + " ConfigMap") cm, err := c.flowsConfigMap(desired) if err != nil { return err @@ -67,7 +67,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl } if desired != nil && *desired != *current { - rlog.Info("Provided Ipfix configuration differs current configuration. Updating") + rlog.Info("Provided IPFIX configuration differs current configuration. Updating") cm, err := c.flowsConfigMap(desired) if err != nil { return err @@ -101,11 +101,11 @@ func (c *FlowsConfigCNOController) current(ctx context.Context) (*flowsConfig, e func (c *FlowsConfigCNOController) desired( ctx context.Context, coll *flowslatest.FlowCollector) *flowsConfig { - corrected := coll.Spec.Agent.Ipfix.DeepCopy() + corrected := coll.Spec.Agent.IPFIX.DeepCopy() corrected.Sampling = getSampling(ctx, corrected) return &flowsConfig{ - FlowCollectorIpfix: *corrected, + FlowCollectorIPFIX: *corrected, NodePort: coll.Spec.Processor.Port, } } diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go index ed4ffb42d..926aed71c 100644 --- a/controllers/ovs/flowsconfig_ovnk_reconciler.go +++ b/controllers/ovs/flowsconfig_ovnk_reconciler.go @@ -51,15 +51,15 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows Name: c.config.DaemonSetName, Namespace: c.config.Namespace, }, ds); err != nil { - if kerr.IsNotFound(err) && !helper.UseIpfix(&target.Spec) { - // If we don't want Ipfix and ovn-k daemonset is not found, assume there no ovn-k, just succeed + if kerr.IsNotFound(err) && !helper.UseIPFIX(&target.Spec) { + // If we don't want IPFIX and ovn-k daemonset is not found, assume there no ovn-k, just succeed rlog.Info("Skip reconciling OVN: OVN DaemonSet not found") return nil } return fmt.Errorf("retrieving %s/%s daemonset: %w", c.config.Namespace, c.config.DaemonSetName, err) } - ovnkubeNode := helper.FindContainer(&ds.Spec.Template.Spec, target.Spec.Agent.Ipfix.OVNKubernetes.ContainerName) + ovnkubeNode := helper.FindContainer(&ds.Spec.Template.Spec, target.Spec.Agent.IPFIX.OVNKubernetes.ContainerName) if ovnkubeNode == nil { return errors.New("could not find container ovnkube-node") } @@ -71,7 +71,7 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows } } if anyUpdate { - rlog.Info("Provided Ipfix configuration differs current configuration. Updating") + rlog.Info("Provided IPFIX configuration differs current configuration. Updating") return c.Update(ctx, ds) } @@ -80,21 +80,21 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows } func (c *FlowsConfigOVNKController) desiredEnv(ctx context.Context, coll *flowslatest.FlowCollector) (map[string]string, error) { - cacheTimeout, err := time.ParseDuration(coll.Spec.Agent.Ipfix.CacheActiveTimeout) + cacheTimeout, err := time.ParseDuration(coll.Spec.Agent.IPFIX.CacheActiveTimeout) if err != nil { return nil, err } - sampling := getSampling(ctx, &coll.Spec.Agent.Ipfix) + sampling := getSampling(ctx, &coll.Spec.Agent.IPFIX) envs := map[string]string{ "OVN_IPFIX_TARGETS": "", "OVN_IPFIX_CACHE_ACTIVE_TIMEOUT": strconv.Itoa(int(cacheTimeout.Seconds())), - "OVN_IPFIX_CACHE_MAX_FLOWS": strconv.Itoa(int(coll.Spec.Agent.Ipfix.CacheMaxFlows)), + "OVN_IPFIX_CACHE_MAX_FLOWS": strconv.Itoa(int(coll.Spec.Agent.IPFIX.CacheMaxFlows)), "OVN_IPFIX_SAMPLING": strconv.Itoa(int(sampling)), } - if !helper.UseIpfix(&coll.Spec) { - // No Ipfix => leave target empty and return + if !helper.UseIPFIX(&coll.Spec) { + // No IPFIX => leave target empty and return return envs, nil } diff --git a/controllers/ovs/flowsconfig_types.go b/controllers/ovs/flowsconfig_types.go index 846cbc5c6..b84a20957 100644 --- a/controllers/ovs/flowsconfig_types.go +++ b/controllers/ovs/flowsconfig_types.go @@ -12,7 +12,7 @@ import ( ) type flowsConfig struct { - flowslatest.FlowCollectorIpfix `json:",inline" mapstructure:",squash"` + flowslatest.FlowCollectorIPFIX `json:",inline" mapstructure:",squash"` SharedTarget string `json:"sharedTarget,omitempty" mapstructure:"sharedTarget,omitempty"` NodePort int32 `json:"nodePort,omitempty" mapstructure:"nodePort,omitempty"` } @@ -41,7 +41,7 @@ func (fc *flowsConfig) asStringMap() (map[string]string, error) { // getSampling returns the configured sampling, or 1 if ipfix.forceSampleAll is true // Note that configured sampling has a minimum value of 2. // See also https://bugzilla.redhat.com/show_bug.cgi?id=2103136 , https://bugzilla.redhat.com/show_bug.cgi?id=2104943 -func getSampling(ctx context.Context, cfg *flowslatest.FlowCollectorIpfix) int32 { +func getSampling(ctx context.Context, cfg *flowslatest.FlowCollectorIPFIX) int32 { rlog := log.FromContext(ctx) if cfg.ForceSampleAll { rlog.Info("Warning, sampling is set to 1. This may put cluster stability at risk.") diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 29d061a96..081a87a6c 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -9111,24 +9111,24 @@ Agent configuration for flows extraction.
@@ -9140,7 +9140,7 @@ Agent configuration for flows extraction. -`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`. +`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
ebpf object - `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`.
+ `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
false
ipfix object - `ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.
+ `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
false
type enum - `type` selects the flows tracing agent. Possible values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
`Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, but they would require manual configuration).
+ `type` selects the flows tracing agent. Possible values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).

- Enum: Ebpf, Ipfix
- Default: Ebpf
+ Enum: eBPF, IPFIX
+ Default: eBPF
false
@@ -9363,7 +9363,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -`ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`. +`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
@@ -9405,7 +9405,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -9414,14 +9414,14 @@ ResourceClaim references one entry in PodSpec.ResourceClaims.
forceSampleAll boolean - `forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.
+ `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.

Default: false
ovnKubernetes object - `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+ `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
false
sampling integer - `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix.
+ `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.

Format: int32
Default: 400
@@ -9466,7 +9466,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. +`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. @@ -10710,16 +10710,16 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -10738,7 +10738,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to. +IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
type enum - `type` selects the type of exporters. The available options are `Kafka` and `Ipfix`.
+ `type` selects the type of exporters. The available options are `Kafka` and `IPFIX`.

- Enum: Kafka, Ipfix
+ Enum: Kafka, IPFIX
true
ipfix object - Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to.
+ IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
false
@@ -10753,7 +10753,7 @@ Ipfix configuration, such as the IP address and port to send enriched Ipfix flow @@ -10762,14 +10762,14 @@ Ipfix configuration, such as the IP address and port to send enriched Ipfix flow diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index aedf7b049..96e91fbd0 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -3535,7 +3535,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (Ebpf) + name: Sampling (EBPF) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -3563,7 +3563,7 @@ spec: description: Agent configuration for flows extraction. properties: ebpf: - description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`.' + description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.' properties: cacheActiveTimeout: default: 5s @@ -3685,7 +3685,7 @@ spec: type: integer type: object ipfix: - description: '`ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.' + description: '`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.' properties: cacheActiveTimeout: default: 20s @@ -3708,10 +3708,10 @@ spec: type: object forceSampleAll: default: false - description: '`forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' + description: '`forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' type: boolean ovnKubernetes: - description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' + description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: containerName: default: ovnkube-node @@ -3728,17 +3728,17 @@ spec: type: object sampling: default: 400 - description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix.' + description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.' format: int32 minimum: 2 type: integer type: object type: - default: Ebpf - description: '`type` selects the flows tracing agent. Possible values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
`Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, but they would require manual configuration).' + default: eBPF + description: '`type` selects the flows tracing agent. Possible values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).' enum: - - Ebpf - - Ipfix + - eBPF + - IPFIX type: string type: object consolePlugin: @@ -4242,17 +4242,17 @@ spec: description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.' properties: ipfix: - description: Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to. + description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. properties: targetHost: default: "" - description: Address of the Ipfix external receiver + description: Address of the IPFIX external receiver type: string targetPort: - description: Port for the Ipfix external receiver + description: Port for the IPFIX external receiver type: integer transport: - description: Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`. + description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. enum: - TCP - UDP @@ -4387,10 +4387,10 @@ spec: - topic type: object type: - description: '`type` selects the type of exporters. The available options are `Kafka` and `Ipfix`.' + description: '`type` selects the type of exporters. The available options are `Kafka` and `IPFIX`.' enum: - Kafka - - Ipfix + - IPFIX type: string required: - type diff --git a/pkg/conversion/conversion.go b/pkg/conversion/conversion.go index a9c8b3b17..fd1f91c1e 100644 --- a/pkg/conversion/conversion.go +++ b/pkg/conversion/conversion.go @@ -2,12 +2,24 @@ package conversion import ( + "regexp" + "strings" + "unicode" + "github.com/netobserv/network-observability-operator/controllers/constants" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" ) +// Following K8S convention, mixed capitalization should be preserved +// see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#constants +var upperPascalExceptions = map[string]string{ + "IPFIX": "IPFIX", + "EBPF": "eBPF", + "SCRAM-SHA512": "ScramSHA512", +} + // MarshalData stores the source object as json data in the destination object annotations map. // It ignores the metadata of the source object. func MarshalData(src metav1.Object, dst metav1.Object) error { @@ -44,3 +56,48 @@ func UnmarshalData(from metav1.Object, to interface{}) (bool, error) { from.SetAnnotations(annotations) return true, nil } + +func UpperToPascal(str string) string { + if len(str) == 0 { + return str + } + + // check for any exception in map + if exception, found := upperPascalExceptions[str]; found { + return exception + } + + // Split on '-' or '_' rune, capitalize first letter of each part and join them + var sb strings.Builder + array := regexp.MustCompile(`[\-\_]+`).Split(strings.ToLower(str), -1) + for _, s := range array { + runes := []rune(s) + runes[0] = unicode.ToUpper(runes[0]) + sb.WriteString(string(runes)) + } + return sb.String() +} + +func PascalToUpper(str string, splitter rune) string { + if len(str) == 0 { + return str + } + + // check for any exception in map + for k, v := range upperPascalExceptions { + if v == str { + return k + } + } + + // Split on capital letters, upper each part and join with splitter + var sb strings.Builder + runes := []rune(str) + for i, r := range runes { + if i > 0 && unicode.IsUpper(r) { + sb.WriteRune(splitter) + } + sb.WriteRune(unicode.ToUpper(r)) + } + return sb.String() +} diff --git a/pkg/conversion/conversion_test.go b/pkg/conversion/conversion_test.go new file mode 100644 index 000000000..2f3ad02f6 --- /dev/null +++ b/pkg/conversion/conversion_test.go @@ -0,0 +1,39 @@ +package conversion + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUpperToPascal(t *testing.T) { + assert := assert.New(t) + + v := UpperToPascal("") + assert.Equal("", v) + + v = UpperToPascal("EBPF") + assert.Equal("eBPF", v) + + v = UpperToPascal("ENDED_CONVERSATIONS") + assert.Equal("EndedConversations", v) + + v = UpperToPascal("SCRAM-SHA512") + assert.Equal("ScramSHA512", v) +} + +func TestPascalToUpper(t *testing.T) { + assert := assert.New(t) + + v := PascalToUpper("", ' ') + assert.Equal("", v) + + v = PascalToUpper("eBPF", ' ') + assert.Equal("EBPF", v) + + v = PascalToUpper("EndedConversations", '_') + assert.Equal("ENDED_CONVERSATIONS", v) + + v = PascalToUpper("ScramSHA512", '-') + assert.Equal("SCRAM-SHA512", v) +} diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index 2e736675d..0ddf85561 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -10,18 +10,18 @@ import ( ) func GetSampling(spec *flowslatest.FlowCollectorSpec) int { - if UseEbpf(spec) { - return int(*spec.Agent.Ebpf.Sampling) + if UseEBPF(spec) { + return int(*spec.Agent.EBPF.Sampling) } - return int(spec.Agent.Ipfix.Sampling) + return int(spec.Agent.IPFIX.Sampling) } -func UseEbpf(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentEbpf +func UseEBPF(spec *flowslatest.FlowCollectorSpec) bool { + return spec.Agent.Type == flowslatest.AgentEBPF } -func UseIpfix(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentIpfix +func UseIPFIX(spec *flowslatest.FlowCollectorSpec) bool { + return spec.Agent.Type == flowslatest.AgentIPFIX } func UseKafka(spec *flowslatest.FlowCollectorSpec) bool { @@ -90,7 +90,7 @@ func UseConsolePlugin(spec *flowslatest.FlowCollectorSpec) bool { (spec.ConsolePlugin.Enable == nil || *spec.ConsolePlugin.Enable) } -func IsFeatureEnabled(spec *flowslatest.FlowCollectorEbpf, feature flowslatest.AgentFeature) bool { +func IsFeatureEnabled(spec *flowslatest.FlowCollectorEBPF, feature flowslatest.AgentFeature) bool { for _, f := range spec.Features { if f == feature { return true @@ -99,22 +99,22 @@ func IsFeatureEnabled(spec *flowslatest.FlowCollectorEbpf, feature flowslatest.A return false } -func IsPrivileged(spec *flowslatest.FlowCollectorEbpf) bool { +func IsPrivileged(spec *flowslatest.FlowCollectorEBPF) bool { return spec.Privileged } -func IsPktDropEnabled(spec *flowslatest.FlowCollectorEbpf) bool { +func IsPktDropEnabled(spec *flowslatest.FlowCollectorEBPF) bool { if IsPrivileged(spec) && IsFeatureEnabled(spec, flowslatest.PacketDrop) { return true } return false } -func IsDNSTrackingEnabled(spec *flowslatest.FlowCollectorEbpf) bool { +func IsDNSTrackingEnabled(spec *flowslatest.FlowCollectorEBPF) bool { return IsFeatureEnabled(spec, flowslatest.DNSTracking) } -func IsFlowRTTEnabled(spec *flowslatest.FlowCollectorEbpf) bool { +func IsFlowRTTEnabled(spec *flowslatest.FlowCollectorEBPF) bool { return IsFeatureEnabled(spec, flowslatest.FlowRTT) } From 0d73c3e0d4f215f403eb4b7d474a74afd8bc7073 Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Mon, 6 Nov 2023 10:14:03 -0500 Subject: [PATCH 05/16] Update eBPF enum --- api/v1beta2/flowcollector_types.go | 6 +++--- config/crd/bases/flows.netobserv.io_flowcollectors.yaml | 6 +++--- docs/FlowCollector.md | 6 +++--- hack/cloned.flows.netobserv.io_flowcollectors.yaml | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 766279456..4537b1b79 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -110,7 +110,7 @@ type FlowCollectorAgent struct { IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` // `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` - // is set to `EBPF`. + // is set to `eBPF`. // +optional EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` } @@ -241,10 +241,10 @@ type FlowCollectorEBPF struct { // List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
// - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting // the kernel debug filesystem, so the eBPF pod has to run as privileged. - // If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
+ // If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
// - `DNSTracking`: enable the DNS tracking feature. This feature requires mounting // the kernel debug filesystem hence the eBPF pod has to run as privileged. - // If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
+ // If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
// - `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
// +optional Features []AgentFeature `json:"features,omitempty"` diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 561564686..6b46d274e 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -5134,7 +5134,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `EBPF`.' + flow reporter when `spec.agent.type` is set to `eBPF`.' properties: cacheActiveTimeout: default: 5s @@ -5191,11 +5191,11 @@ spec: might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, - so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run - as privileged. If the `spec.agent.eBPF.privileged` parameter + as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 081a87a6c..bbe570141 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -9111,7 +9111,7 @@ Agent configuration for flows extraction.
@@ -9140,7 +9140,7 @@ Agent configuration for flows extraction. -`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`. +`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`.
targetHost string - Address of the Ipfix external receiver
+ Address of the IPFIX external receiver

Default:
targetPort integer - Port for the Ipfix external receiver
+ Port for the IPFIX external receiver
true
transport enum - Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`.
+ Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.

Enum: TCP, UDP
ebpf object - `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
+ `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`.
false
@@ -9191,7 +9191,7 @@ Agent configuration for flows extraction. diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index 96e91fbd0..fbda44e7f 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -3563,7 +3563,7 @@ spec: description: Agent configuration for flows extraction. properties: ebpf: - description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.' + description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`.' properties: cacheActiveTimeout: default: 5s @@ -3593,7 +3593,7 @@ spec: type: string type: array features: - description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
' + description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
' items: description: Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency. [Unsupported (*)].
enum: From 3070fc9c432beae6eab74b29464e37c6e3bd7ad2 Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Mon, 6 Nov 2023 10:27:09 -0500 Subject: [PATCH 06/16] Update ebpf enum 2 --- api/v1beta1/flowcollector_types.go | 4 ++-- .../manifests/flows.netobserv.io_flowcollectors.yaml | 10 +++++----- .../crd/bases/flows.netobserv.io_flowcollectors.yaml | 4 ++-- docs/FlowCollector.md | 2 +- docs/flowcollector-flows-netobserv-io-v1beta1.adoc | 4 ++-- hack/cloned.flows.netobserv.io_flowcollectors.yaml | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index 68d6d311b..02fab4431 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -234,10 +234,10 @@ type FlowCollectorEBPF struct { // List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
// - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting // the kernel debug filesystem, so the eBPF pod has to run as privileged. - // If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
+ // If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
// - `DNSTracking`: enable the DNS tracking feature. This feature requires mounting // the kernel debug filesystem hence the eBPF pod has to run as privileged. - // If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
+ // If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
// - `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
// +optional Features []AgentFeature `json:"features,omitempty"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index dc5271dc2..7eb7803dc 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -2552,11 +2552,11 @@ spec: might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, - so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run - as privileged. If the `spec.agent.eBPF.privileged` parameter + as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with @@ -5148,7 +5148,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `EBPF`.' + flow reporter when `spec.agent.type` is set to `eBPF`.' properties: cacheActiveTimeout: default: 5s @@ -5205,11 +5205,11 @@ spec: might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, - so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run - as privileged. If the `spec.agent.eBPF.privileged` parameter + as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 6b46d274e..d9690c0c7 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -2538,11 +2538,11 @@ spec: might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, - so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` + so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run - as privileged. If the `spec.agent.eBPF.privileged` parameter + as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index bbe570141..fb7082b1b 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -4556,7 +4556,7 @@ Agent configuration for flows extraction.
diff --git a/docs/flowcollector-flows-netobserv-io-v1beta1.adoc b/docs/flowcollector-flows-netobserv-io-v1beta1.adoc index 31105d960..5028b2c5f 100644 --- a/docs/flowcollector-flows-netobserv-io-v1beta1.adoc +++ b/docs/flowcollector-flows-netobserv-io-v1beta1.adoc @@ -177,8 +177,8 @@ Type:: | `features` | `array (string)` | List of additional features to enable. They are all disabled by default. Enabling additional features may have performance impacts. Possible values are: + - - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported. + - - `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported. + + - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported. + + - `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported. + - `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1. + diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index fbda44e7f..2b30c99d3 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -1769,7 +1769,7 @@ spec: type: string type: array features: - description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
' + description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
' items: description: Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency. [Unsupported (*)].
enum: From 0d51d44b1ca04a35c5e5ef3e57942a6f46bf4add Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau Date: Fri, 10 Nov 2023 10:19:45 +0100 Subject: [PATCH 07/16] pre compute regex feedback --- pkg/conversion/conversion.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/conversion/conversion.go b/pkg/conversion/conversion.go index fd1f91c1e..d67022e3c 100644 --- a/pkg/conversion/conversion.go +++ b/pkg/conversion/conversion.go @@ -20,6 +20,8 @@ var upperPascalExceptions = map[string]string{ "SCRAM-SHA512": "ScramSHA512", } +var upperTokenizer = regexp.MustCompile(`[\-\_]+`) + // MarshalData stores the source object as json data in the destination object annotations map. // It ignores the metadata of the source object. func MarshalData(src metav1.Object, dst metav1.Object) error { @@ -69,7 +71,7 @@ func UpperToPascal(str string) string { // Split on '-' or '_' rune, capitalize first letter of each part and join them var sb strings.Builder - array := regexp.MustCompile(`[\-\_]+`).Split(strings.ToLower(str), -1) + array := upperTokenizer.Split(strings.ToLower(str), -1) for _, s := range array { runes := []rune(s) runes[0] = unicode.ToUpper(runes[0]) From a42d42b63211dbb82a2e44457ae2fbda1f313375 Mon Sep 17 00:00:00 2001 From: Julien Pinsonneau <91894519+jpinsonneau@users.noreply.github.com> Date: Fri, 10 Nov 2023 11:44:57 +0100 Subject: [PATCH 08/16] update config (#488) --- .../config/static-frontend-config.yaml | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/controllers/consoleplugin/config/static-frontend-config.yaml b/controllers/consoleplugin/config/static-frontend-config.yaml index 2073b7705..9e78658a2 100644 --- a/controllers/consoleplugin/config/static-frontend-config.yaml +++ b/controllers/consoleplugin/config/static-frontend-config.yaml @@ -305,17 +305,17 @@ columns: width: 15 - id: K8S_Object name: Kubernetes Objects - calculated: '[getConcatenatedValue(SrcAddr,SrcPort,SrcK8S_Type,SrcK8S_Namespace,SrcK8S_Name),getConcatenatedValue(DstAddr,DstPort,DstK8S_Type,DstK8S_Namespace,DstK8S_Name)]' + calculated: '[column.SrcK8S_Object,column.DstK8S_Object]' default: false width: 15 - id: K8S_OwnerObject name: Owner Kubernetes Objects - calculated: '[getConcatenatedValue(SrcAddr,SrcPort,SrcK8S_OwnerType,SrcK8S_Namespace,SrcK8S_OwnerName),getConcatenatedValue(DstAddr,DstPort,DstK8S_OwnerType,DstK8S_Namespace,DstK8S_OwnerName)]' + calculated: '[column.SrcK8S_OwnerObject,column.DstK8S_OwnerObject]' default: false width: 15 - id: AddrPort name: IPs & Ports - calculated: '[getConcatenatedValue(SrcAddr,SrcPort),getConcatenatedValue(DstAddr,DstPort)]' + calculated: '[column.SrcAddrPort,column.DstAddrPort]' default: false width: 15 - id: Proto @@ -340,15 +340,15 @@ columns: tooltip: The type of the ICMP message field: IcmpType filter: icmp_type - default: true + default: false width: 10 - id: IcmpCode group: ICMP name: Code tooltip: The code of the ICMP message field: IcmpCode - quickFilter: icmp_code - default: true + filter: icmp_code + default: false width: 10 - id: FlowDirection name: Direction @@ -380,24 +380,20 @@ columns: - id: FlowDuration name: Duration tooltip: Time elapsed between Start Time and End Time. + calculated: substract(TimeFlowEndMs,TimeFlowStartMs) default: false width: 5 - - id: TimeFlowRttMs - name: Flow RTT - tooltip: TCP handshake Round Trip Time - field: TimeFlowRttNs - filter: time_flow_rtt - default: true - width: 5 - id: CollectionTime name: Collection Time tooltip: Reception time of the record by the collector. + calculated: multiply(TimeReceived,1000), field: TimeReceived default: false width: 15 - id: CollectionLatency name: Collection Latency tooltip: Time elapsed between End Time and Collection Time. + calculated: substract(column.CollectionTime,TimeFlowEndMs) default: false width: 5 - id: DNSId @@ -412,6 +408,8 @@ columns: group: DNS name: DNS Latency tooltip: Time elapsed between DNS request and response. + field: DnsLatencyMs + filter: dns_latency default: true width: 5 - id: DNSResponseCode @@ -430,6 +428,13 @@ columns: filter: dns_errno default: false width: 5 + - id: TimeFlowRttMs + name: Flow RTT + tooltip: TCP handshake Round Trip Time + field: TimeFlowRttNs + filter: time_flow_rtt + default: true + width: 5 filters: - id: src_namespace name: Namespace @@ -703,10 +708,6 @@ filters: component: text placeholder: 'E.g: br-ex, ovn-k8s-mp0' hint: Specify a network interface. - - id: dscp - name: DSCP value - component: number - hint: Specify a Differentiated Services Code Point value as integer number. - id: id name: Conversation Id component: text From 3414c8f866356b34514f3c2a509979268d982317 Mon Sep 17 00:00:00 2001 From: Joel Takvorian Date: Fri, 10 Nov 2023 15:57:59 +0100 Subject: [PATCH 09/16] NETOBSERV-1326: NETOBSERV-1231: Drops & RTT metrics (#453) * NETOBSERV-1326: NETOBSERV-1231: Drops & RTT metrics - Added metrics: node_rtt, namespace_rtt, workload_rtt, node_drop_packets_total, node_drop_bytes_total, namespace_drop_packets_total, namespace_drop_bytes_total, workload_drop_packets_total, workload_drop_bytes_total - Add dashboards for drops (not yet for RTT, need to handle histomgrams in dashboards first) * Update CRD doc and tests with added metrics * Set new defaults * Update CRD doc * externalize metrics doc --- api/v1beta1/flowcollector_types.go | 12 ++-- api/v1beta1/flowcollector_webhook_test.go | 4 +- api/v1beta2/flowcollector_types.go | 12 ++-- .../flows.netobserv.io_flowcollectors.yaml | 38 +++++----- .../flows.netobserv.io_flowcollectors.yaml | 38 +++++----- controllers/flowlogspipeline/flp_test.go | 11 +-- docs/FlowCollector.md | 4 +- docs/Metrics.md | 70 ++++++++++--------- ...ned.flows.netobserv.io_flowcollectors.yaml | 4 +- pkg/dashboards/dashboard.go | 42 +++++++---- pkg/dashboards/dashboard_test.go | 16 ++--- pkg/metrics/predefined_metrics.go | 51 +++++++++++++- pkg/metrics/predefined_metrics_test.go | 12 +++- 13 files changed, 195 insertions(+), 119 deletions(-) diff --git a/api/v1beta1/flowcollector_types.go b/api/v1beta1/flowcollector_types.go index 02fab4431..dbf59150e 100644 --- a/api/v1beta1/flowcollector_types.go +++ b/api/v1beta1/flowcollector_types.go @@ -353,13 +353,13 @@ type FLPMetrics struct { // +optional IgnoreTags []string `json:"ignoreTags"` - // `includeList` is a list of metric names to specify which metrics to generate. - // The names correspond to the name in Prometheus, without the prefix. For example, + // `includeList` is a list of metric names to specify which ones to generate. + // The names correspond to the names in Prometheus without the prefix. For example, // `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. - // Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, - // `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, - // `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, - // `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`. + // Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. + // Metrics enabled by default are: + // `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled). + // More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md // +optional IncludeList *[]string `json:"includeList,omitempty"` diff --git a/api/v1beta1/flowcollector_webhook_test.go b/api/v1beta1/flowcollector_webhook_test.go index f48b7bdc8..95cc0ddf4 100644 --- a/api/v1beta1/flowcollector_webhook_test.go +++ b/api/v1beta1/flowcollector_webhook_test.go @@ -118,14 +118,14 @@ func TestBeta1ConversionRoundtrip_Metrics(t *testing.T) { assert.Equal([]v1beta2.FLPAlert{v1beta2.AlertLokiError}, converted.Spec.Processor.Metrics.DisableAlerts) assert.NotNil(converted.Spec.Processor.Metrics.IncludeList) - assert.Equal([]string{"namespace_egress_packets_total", "namespace_flows_total"}, *converted.Spec.Processor.Metrics.IncludeList) + assert.Equal([]string{"namespace_egress_packets_total", "namespace_flows_total", "namespace_rtt_seconds", "namespace_drop_packets_total"}, *converted.Spec.Processor.Metrics.IncludeList) // Other way var back FlowCollector err = back.ConvertFrom(&converted) assert.NoError(err) // Here, includeList is preserved; it takes precedence over ignoreTags - assert.Equal([]string{"namespace_egress_packets_total", "namespace_flows_total"}, *back.Spec.Processor.Metrics.IncludeList) + assert.Equal([]string{"namespace_egress_packets_total", "namespace_flows_total", "namespace_rtt_seconds", "namespace_drop_packets_total"}, *back.Spec.Processor.Metrics.IncludeList) assert.Equal(initial.Spec.Processor.Metrics.DisableAlerts, back.Spec.Processor.Metrics.DisableAlerts) assert.Equal(initial.Spec.Processor.Metrics.Server, back.Spec.Processor.Metrics.Server) } diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index 4537b1b79..d62e9e1df 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -352,13 +352,13 @@ type FLPMetrics struct { // +optional Server MetricsServerConfig `json:"server,omitempty"` - // `includeList` is a list of metric names to specify which metrics to generate. - // The names correspond to the name in Prometheus, without the prefix. For example, + // `includeList` is a list of metric names to specify which ones to generate. + // The names correspond to the names in Prometheus without the prefix. For example, // `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. - // Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, - // `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, - // `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, - // `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`. + // Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. + // Metrics enabled by default are: + // `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled). + // More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md // +optional IncludeList *[]string `json:"includeList,omitempty"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 7eb7803dc..4d2425541 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -4827,17 +4827,16 @@ spec: type: array includeList: description: '`includeList` is a list of metric names to specify - which metrics to generate. The names correspond to the name - in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + which ones to generate. The names correspond to the names + in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` - in Prometheus. Available names are: `namespace_egress_bytes_total`, - `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, - `namespace_ingress_packets_total`, `namespace_flows_total`, - `node_egress_bytes_total`, `node_egress_packets_total`, - `node_ingress_bytes_total`, `node_ingress_packets_total`, - `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, - `workload_ingress_bytes_total`, `workload_ingress_packets_total`, - `workload_flows_total`.' + in Prometheus. Note that the more metrics you add, the bigger + is the impact on Prometheus workload resources. Metrics + enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, + `workload_ingress_bytes_total`, `namespace_drop_packets_total` + (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` + (when `FlowRTT` feature is enabled). More information, with + full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' items: type: string type: array @@ -7704,17 +7703,16 @@ spec: type: array includeList: description: '`includeList` is a list of metric names to specify - which metrics to generate. The names correspond to the name - in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + which ones to generate. The names correspond to the names + in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` - in Prometheus. Available names are: `namespace_egress_bytes_total`, - `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, - `namespace_ingress_packets_total`, `namespace_flows_total`, - `node_egress_bytes_total`, `node_egress_packets_total`, - `node_ingress_bytes_total`, `node_ingress_packets_total`, - `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, - `workload_ingress_bytes_total`, `workload_ingress_packets_total`, - `workload_flows_total`.' + in Prometheus. Note that the more metrics you add, the bigger + is the impact on Prometheus workload resources. Metrics + enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, + `workload_ingress_bytes_total`, `namespace_drop_packets_total` + (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` + (when `FlowRTT` feature is enabled). More information, with + full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' items: type: string type: array diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index d9690c0c7..3c3224412 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -4813,17 +4813,16 @@ spec: type: array includeList: description: '`includeList` is a list of metric names to specify - which metrics to generate. The names correspond to the name - in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + which ones to generate. The names correspond to the names + in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` - in Prometheus. Available names are: `namespace_egress_bytes_total`, - `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, - `namespace_ingress_packets_total`, `namespace_flows_total`, - `node_egress_bytes_total`, `node_egress_packets_total`, - `node_ingress_bytes_total`, `node_ingress_packets_total`, - `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, - `workload_ingress_bytes_total`, `workload_ingress_packets_total`, - `workload_flows_total`.' + in Prometheus. Note that the more metrics you add, the bigger + is the impact on Prometheus workload resources. Metrics + enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, + `workload_ingress_bytes_total`, `namespace_drop_packets_total` + (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` + (when `FlowRTT` feature is enabled). More information, with + full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' items: type: string type: array @@ -7690,17 +7689,16 @@ spec: type: array includeList: description: '`includeList` is a list of metric names to specify - which metrics to generate. The names correspond to the name - in Prometheus, without the prefix. For example, `namespace_egress_packets_total` + which ones to generate. The names correspond to the names + in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` - in Prometheus. Available names are: `namespace_egress_bytes_total`, - `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, - `namespace_ingress_packets_total`, `namespace_flows_total`, - `node_egress_bytes_total`, `node_egress_packets_total`, - `node_ingress_bytes_total`, `node_ingress_packets_total`, - `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, - `workload_ingress_bytes_total`, `workload_ingress_packets_total`, - `workload_flows_total`.' + in Prometheus. Note that the more metrics you add, the bigger + is the impact on Prometheus workload resources. Metrics + enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, + `workload_ingress_bytes_total`, `namespace_drop_packets_total` + (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` + (when `FlowRTT` feature is enabled). More information, with + full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' items: type: string type: array diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index 04981295e..8d61e552d 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -915,10 +915,13 @@ func TestMergeMetricsConfiguration_Default(t *testing.T) { jsonStages, _ := json.Marshal(stages) assert.Equal(`[{"name":"ipfix"},{"name":"extract_conntrack","follows":"ipfix"},{"name":"enrich","follows":"extract_conntrack"},{"name":"loki","follows":"enrich"},{"name":"stdout","follows":"enrich"},{"name":"prometheus","follows":"enrich"}]`, string(jsonStages)) names := getSortedMetricsNames(parameters[5].Encode.Prom.Metrics) - assert.Len(names, 3) - assert.Equal("namespace_flows_total", names[0]) - assert.Equal("node_ingress_bytes_total", names[1]) - assert.Equal("workload_ingress_bytes_total", names[2]) + assert.Equal([]string{ + "namespace_drop_packets_total", + "namespace_flows_total", + "namespace_rtt_seconds", + "node_ingress_bytes_total", + "workload_ingress_bytes_total", + }, names) assert.Equal("netobserv_", parameters[5].Encode.Prom.Prefix) } diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index fb7082b1b..a4ec7ede3 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -8556,7 +8556,7 @@ target specifies the target value for the given metric @@ -13717,7 +13717,7 @@ target specifies the target value for the given metric diff --git a/docs/Metrics.md b/docs/Metrics.md index 599af7d74..461fa67d5 100644 --- a/docs/Metrics.md +++ b/docs/Metrics.md @@ -1,35 +1,39 @@ # Metrics in the NetObserv Operator -Configuration of metrics to be collected are stored in the metrics_definitions folder. -These are defined in yaml files according to the format handled by the flp confgenerator. -The flp confgenerator was modified to produce output that can be easily consumed by the NetObserv Operator. -The flp confgenerator was further modified so that it may be called as a module, and provides its output as a data structure returned from a function rather than a yaml file. -All metrics that may be produced are included in the metrics_definitions library, and they are associated with tags. -A parameter is added to the Operator CRD to specify tags of metrics to not produce. - -On each iteration of the Operator, the Operator checks whether the CRD has been modified. -If the CRD has changed, the Operator reconciles the state of the cluster to the specification in the CRD. - -The implementation of the Operator specifies the flp Network Transform enrichment (in particular, kubernetes features). -The actual metrics to produce are taken from the metrics_definitions, based on the enrichment defined in the Operator. -The Operator allocates the extract_aggregate and encode_prom Stage structures for the flp pipeline, -and extract_aggregate and encode_prom entries are filled in using the results from the confgenerator. -The configuration is placed into a configMap. -Flp is then deployed using this combined configuration. -The configuration is not changed during runtime. -In order to change the configuration (e.g. exclude a different set of metrics), flp must be redeployed. - -Note that there are 2 data paths in flp. Data that is ingested is enriched and is then passed directly to Loki. -In addition, after the enrichment, we derive metrics (from the metrics_definitions), aggregate them, and report to prometheus. -The metrics_definitions does not impact the data that is sent to Loki. - -In the metrics_definitions yaml files, there are tags associated with each metric. -A user may specify to skip metrics that have a particular tag. -This is specified by a field in the CRD. -These tags are then specified to the confgenerator module to produce metrics that are not associated with the specified tag. - -## Parameters added to CRD to support metrics -Note: These parameters may be changed between interations, in which case the Operator redeploys flp. -- ignoreMetrics (list of tags to specify which metrics to ignore) - - +The NetObserv operator uses [flowlogs-pipeline](https://github.com/netobserv/flowlogs-pipeline/) to generate metrics out of flow logs. + +They can be configured in the `FlowCollector` custom resource, via `spec.processor.metrics.includeList`. It is a list of metric names that tells which ones to generate. + +The names correspond to the names in Prometheus without their prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. + +Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. Some metrics in particular have a bigger cardinality, such as all metrics starting with `workload_`, which may result in stressing Prometheus if too many of them are enabled. It is recommended to monitor the impact on Prometheus when adding more metrics. + +Available names are: (names followed by `*` are enabled by default) +- `namespace_egress_bytes_total` +- `namespace_egress_packets_total` +- `namespace_ingress_bytes_total` +- `namespace_ingress_packets_total` +- `namespace_flows_total` `*` +- `node_egress_bytes_total` +- `node_egress_packets_total` +- `node_ingress_bytes_total` `*` +- `node_ingress_packets_total` +- `node_flows_total` +- `workload_egress_bytes_total` +- `workload_egress_packets_total` +- `workload_ingress_bytes_total` `*` +- `workload_ingress_packets_total` +- `workload_flows_total` + +When the `PacketDrop` feature is enabled in `spec.agent.ebpf.features` (with privileged mode), additional metrics are available: +- `namespace_drop_bytes_total` +- `namespace_drop_packets_total` `*` +- `node_drop_bytes_total` +- `node_drop_packets_total` +- `workload_drop_bytes_total` +- `workload_drop_packets_total` + +When the `FlowRTT` feature is enabled in `spec.agent.ebpf.features`, additional metrics are available: +- `namespace_rtt_seconds` `*` +- `node_rtt_seconds` +- `workload_rtt_seconds` diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index 2b30c99d3..b0e432eb0 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -3339,7 +3339,7 @@ spec: type: string type: array includeList: - description: '`includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.' + description: '`includeList` is a list of metric names to specify which ones to generate. The names correspond to the names in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' items: type: string type: array @@ -5320,7 +5320,7 @@ spec: type: string type: array includeList: - description: '`includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.' + description: '`includeList` is a list of metric names to specify which ones to generate. The names correspond to the names in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' items: type: string type: array diff --git a/pkg/dashboards/dashboard.go b/pkg/dashboards/dashboard.go index 6f52c7559..76e72ceed 100644 --- a/pkg/dashboards/dashboard.go +++ b/pkg/dashboards/dashboard.go @@ -16,19 +16,21 @@ type rowInfo struct { // Queries const ( - layerApps = "Applications" - layerInfra = "Infrastructure" - appsFilters1 = `SrcK8S_Namespace!~"|$NETOBSERV_NS|openshift.*"` - appsFilters2 = `SrcK8S_Namespace=~"$NETOBSERV_NS|openshift.*",DstK8S_Namespace!~"|$NETOBSERV_NS|openshift.*"` - infraFilters1 = `SrcK8S_Namespace=~"$NETOBSERV_NS|openshift.*"` - infraFilters2 = `SrcK8S_Namespace!~"$NETOBSERV_NS|openshift.*",DstK8S_Namespace=~"$NETOBSERV_NS|openshift.*"` - metricTagNamespaces = "namespaces" - metricTagNodes = "nodes" - metricTagWorkloads = "workloads" - metricTagIngress = "ingress" - metricTagEgress = "egress" - metricTagBytes = "bytes" - metricTagPackets = "packets" + layerApps = "Applications" + layerInfra = "Infrastructure" + appsFilters1 = `SrcK8S_Namespace!~"|$NETOBSERV_NS|openshift.*"` + appsFilters2 = `SrcK8S_Namespace=~"$NETOBSERV_NS|openshift.*",DstK8S_Namespace!~"|$NETOBSERV_NS|openshift.*"` + infraFilters1 = `SrcK8S_Namespace=~"$NETOBSERV_NS|openshift.*"` + infraFilters2 = `SrcK8S_Namespace!~"$NETOBSERV_NS|openshift.*",DstK8S_Namespace=~"$NETOBSERV_NS|openshift.*"` + metricTagNamespaces = "namespaces" + metricTagNodes = "nodes" + metricTagWorkloads = "workloads" + metricTagIngress = "ingress" + metricTagEgress = "egress" + metricTagBytes = "bytes" + metricTagPackets = "packets" + metricTagDropBytes = "drop_bytes" + metricTagDropPackets = "drop_packets" ) var ( @@ -85,6 +87,7 @@ var ( func init() { for _, group := range []string{metricTagNodes, metricTagNamespaces, metricTagWorkloads} { groupTrimmed := strings.TrimSuffix(group, "s") + // byte/pkt rates for _, vt := range []string{metricTagBytes, metricTagPackets} { for _, dir := range []string{metricTagEgress, metricTagIngress} { rowsInfo = append(rowsInfo, rowInfo{ @@ -95,6 +98,15 @@ func init() { }) } } + // drops + for _, vt := range []string{metricTagDropBytes, metricTagDropPackets} { + rowsInfo = append(rowsInfo, rowInfo{ + metric: fmt.Sprintf("netobserv_%s_%s_total", groupTrimmed, vt), + group: group, + valueType: vt, + }) + } + // TODO: RTT dashboard (after dashboard refactoring for exposed metrics; need to handle histogram queries) } } @@ -217,6 +229,10 @@ func flowMetricsRow(netobsNs string, rowInfo rowInfo) string { vt = "byte" case metricTagPackets: vt = "packet" + case metricTagDropBytes: + vt = "drop bytes" + case metricTagDropPackets: + vt = "drop packets" } title := fmt.Sprintf("Top %s rates %s per source and destination %s", vt, verb, rowInfo.group) var panels string diff --git a/pkg/dashboards/dashboard_test.go b/pkg/dashboards/dashboard_test.go index 01430c265..87bf99a09 100644 --- a/pkg/dashboards/dashboard_test.go +++ b/pkg/dashboards/dashboard_test.go @@ -18,7 +18,7 @@ func TestCreateFlowMetricsDashboard_All(t *testing.T) { assert.NoError(err) assert.Equal("NetObserv", d.Title) - assert.Len(d.Rows, 12) + assert.Len(d.Rows, 18) // First row row := 0 @@ -28,8 +28,8 @@ func TestCreateFlowMetricsDashboard_All(t *testing.T) { assert.Len(d.Rows[row].Panels[0].Targets, 1) assert.Contains(d.Rows[row].Panels[0].Targets[0].Expr, "label_replace(label_replace(topk(10,sum(rate(netobserv_node_egress_bytes_total[1m])) by (SrcK8S_HostName, DstK8S_HostName))") - // 6th row - row = 5 + // 8th row + row = 7 assert.Equal("Top byte rates received per source and destination namespaces", d.Rows[row].Title) assert.Len(d.Rows[row].Panels, 2) assert.Equal("Applications", d.Rows[row].Panels[0].Title) @@ -42,8 +42,8 @@ func TestCreateFlowMetricsDashboard_All(t *testing.T) { `label_replace(label_replace(topk(10,sum(rate(netobserv_namespace_ingress_bytes_total{SrcK8S_Namespace=~"netobserv|openshift.*"}[1m]) or rate(netobserv_namespace_ingress_bytes_total{SrcK8S_Namespace!~"netobserv|openshift.*",DstK8S_Namespace=~"netobserv|openshift.*"}[1m])) by (SrcK8S_Namespace, DstK8S_Namespace))`, ) - // 12th row - row = 11 + // 16th row + row = 15 assert.Equal("Top packet rates received per source and destination workloads", d.Rows[row].Title) assert.Len(d.Rows[row].Panels, 2) assert.Equal("Applications", d.Rows[row].Panels[0].Title) @@ -88,7 +88,7 @@ func TestCreateFlowMetricsDashboard_DefaultList(t *testing.T) { assert.NoError(err) assert.Equal("NetObserv", d.Title) - assert.Len(d.Rows, 3) + assert.Len(d.Rows, 4) // First row row := 0 @@ -113,8 +113,8 @@ func TestCreateFlowMetricsDashboard_DefaultList(t *testing.T) { `label_replace(label_replace(topk(10,sum(rate(netobserv_workload_ingress_bytes_total{SrcK8S_Namespace=~"netobserv|openshift.*"}[1m]) or rate(netobserv_workload_ingress_bytes_total{SrcK8S_Namespace!~"netobserv|openshift.*",DstK8S_Namespace=~"netobserv|openshift.*"}[1m])) by (SrcK8S_Namespace, DstK8S_Namespace))`, ) - // 3rd row - row = 2 + // 4th row + row = 3 assert.Equal("Top byte rates received per source and destination workloads", d.Rows[row].Title) assert.Len(d.Rows[row].Panels, 2) assert.Equal("Applications", d.Rows[row].Panels[0].Title) diff --git a/pkg/metrics/predefined_metrics.go b/pkg/metrics/predefined_metrics.go index c5f49e316..92a9f51c4 100644 --- a/pkg/metrics/predefined_metrics.go +++ b/pkg/metrics/predefined_metrics.go @@ -34,7 +34,13 @@ var ( } predefinedMetrics []taggedMetricDefinition // Note that we set default in-code rather than in CRD, in order to keep track of value being unset or set intentionnally in FlowCollector - DefaultIncludeList = []string{"node_ingress_bytes_total", "workload_ingress_bytes_total", "namespace_flows_total"} + DefaultIncludeList = []string{ + "node_ingress_bytes_total", + "workload_ingress_bytes_total", + "namespace_flows_total", + "namespace_drop_packets_total", + "namespace_rtt_seconds", + } // Pre-deprecation default IgnoreTags list (1.4) - used before switching to whitelist approach, // to make sure there is no unintended new metrics being collected // Don't add anything here: this is not meant to evolve @@ -61,7 +67,7 @@ func init() { ValueKey: valueField, Filters: []flpapi.PromMetricsFilter{ {Key: "Duplicate", Value: "false"}, - {Key: "FlowDirection", Value: mapDirection[dir], Type: "regex"}, + {Key: "FlowDirection", Value: mapDirection[dir], Type: flpapi.PromFilterRegex}, }, Labels: labels, }, @@ -78,6 +84,47 @@ func init() { }, tags: []string{group, group + "-flows", "flows"}, }) + // RTT metrics + predefinedMetrics = append(predefinedMetrics, taggedMetricDefinition{ + PromMetricsItem: flpapi.PromMetricsItem{ + Name: fmt.Sprintf("%s_rtt_seconds", groupTrimmed), + Type: "histogram", + ValueKey: "TimeFlowRttNs", + Filters: []flpapi.PromMetricsFilter{ + {Key: "TimeFlowRttNs", Type: flpapi.PromFilterPresence}, + }, + Labels: labels, + ValueScale: 1_000_000_000, // ns => s + }, + tags: []string{group, "rtt"}, + }) + // Drops metrics + predefinedMetrics = append(predefinedMetrics, taggedMetricDefinition{ + PromMetricsItem: flpapi.PromMetricsItem{ + Name: fmt.Sprintf("%s_drop_packets_total", groupTrimmed), + Type: "counter", + ValueKey: "PktDropPackets", + Filters: []flpapi.PromMetricsFilter{ + {Key: "Duplicate", Value: "false"}, + {Key: "PktDropPackets", Type: flpapi.PromFilterPresence}, + }, + Labels: labels, + }, + tags: []string{group, tagPackets, "drops"}, + }) + predefinedMetrics = append(predefinedMetrics, taggedMetricDefinition{ + PromMetricsItem: flpapi.PromMetricsItem{ + Name: fmt.Sprintf("%s_drop_bytes_total", groupTrimmed), + Type: "counter", + ValueKey: "PktDropBytes", + Filters: []flpapi.PromMetricsFilter{ + {Key: "Duplicate", Value: "false"}, + {Key: "PktDropBytes", Type: flpapi.PromFilterPresence}, + }, + Labels: labels, + }, + tags: []string{group, tagBytes, "drop"}, + }) } } diff --git a/pkg/metrics/predefined_metrics_test.go b/pkg/metrics/predefined_metrics_test.go index e2a2108ef..eec99b716 100644 --- a/pkg/metrics/predefined_metrics_test.go +++ b/pkg/metrics/predefined_metrics_test.go @@ -11,7 +11,17 @@ func TestIncludeExclude(t *testing.T) { // IgnoreTags set, Include list unset => resolving ignore tags res := GetEnabledNames([]string{"egress", "packets", "flows"}, nil) - assert.Equal([]string{"node_ingress_bytes_total", "namespace_ingress_bytes_total", "workload_ingress_bytes_total"}, res) + assert.Equal([]string{ + "node_ingress_bytes_total", + "node_rtt_seconds", + "node_drop_bytes_total", + "namespace_ingress_bytes_total", + "namespace_rtt_seconds", + "namespace_drop_bytes_total", + "workload_ingress_bytes_total", + "workload_rtt_seconds", + "workload_drop_bytes_total", + }, res) // IgnoreTags set, Include list set => keep include list res = GetEnabledNames([]string{"egress", "packets"}, &[]string{"namespace_flows_total"}) From d522a600c73653d3b0a2cfbbeb4266e305448014 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 10:02:31 +0000 Subject: [PATCH 10/16] Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#490) Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.29.0 to 1.30.0. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.29.0...v1.30.0) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/onsi/gomega/CHANGELOG.md | 9 +++++++++ vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- vendor/github.com/onsi/gomega/matchers.go | 17 +++++++++++++++++ .../onsi/gomega/matchers/be_false_matcher.go | 13 +++++++++++-- .../onsi/gomega/matchers/be_true_matcher.go | 13 +++++++++++-- vendor/modules.txt | 2 +- 8 files changed, 53 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 885ab57c2..7a8f869e0 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/netobserv/flowlogs-pipeline v0.1.11-0.20231108130854-fac8a619b238 github.com/onsi/ginkgo/v2 v2.13.0 - github.com/onsi/gomega v1.29.0 + github.com/onsi/gomega v1.30.0 github.com/openshift/api v0.0.0-20220112145620-704957ce4980 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0 github.com/prometheus/common v0.44.0 diff --git a/go.sum b/go.sum index f5bc459ba..2843b2cf1 100644 --- a/go.sum +++ b/go.sum @@ -170,8 +170,8 @@ github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xl github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/openshift/api v0.0.0-20220112145620-704957ce4980 h1:3tSAAM6kvTTLI7EevJdrX+QHJqQDndWamsudZ1GUFYE= github.com/openshift/api v0.0.0-20220112145620-704957ce4980/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 4fc45f29c..fe72a7b18 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,12 @@ +## 1.30.0 + +### Features +- BeTrueBecause and BeFalseBecause allow for better failure messages [4da4c7f] + +### Maintenance +- Bump actions/checkout from 3 to 4 (#694) [6ca6e97] +- doc: fix type on gleak go doc [f1b8343] + ## 1.29.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index ba082146a..c271a366a 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.29.0" +const GOMEGA_VERSION = "1.30.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index cd3f431d2..43f994374 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -1,6 +1,7 @@ package gomega import ( + "fmt" "time" "github.com/google/go-cmp/cmp" @@ -52,15 +53,31 @@ func BeNil() types.GomegaMatcher { } // BeTrue succeeds if actual is true +// +// In general, it's better to use `BeTrueBecause(reason)` to provide a more useful error message if a true check fails. func BeTrue() types.GomegaMatcher { return &matchers.BeTrueMatcher{} } // BeFalse succeeds if actual is false +// +// In general, it's better to use `BeFalseBecause(reason)` to provide a more useful error message if a false check fails. func BeFalse() types.GomegaMatcher { return &matchers.BeFalseMatcher{} } +// BeTrueBecause succeeds if actual is true and displays the provided reason if it is false +// fmt.Sprintf is used to render the reason +func BeTrueBecause(format string, args ...any) types.GomegaMatcher { + return &matchers.BeTrueMatcher{Reason: fmt.Sprintf(format, args...)} +} + +// BeFalseBecause succeeds if actual is false and displays the provided reason if it is true. +// fmt.Sprintf is used to render the reason +func BeFalseBecause(format string, args ...any) types.GomegaMatcher { + return &matchers.BeFalseMatcher{Reason: fmt.Sprintf(format, args...)} +} + // HaveOccurred succeeds if actual is a non-nil error // The typical Go error checking pattern looks like: // diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go index e326c0157..8ee2b1c51 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -9,6 +9,7 @@ import ( ) type BeFalseMatcher struct { + Reason string } func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { @@ -20,9 +21,17 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be false") + if matcher.Reason == "" { + return format.Message(actual, "to be false") + } else { + return matcher.Reason + } } func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be false") + if matcher.Reason == "" { + return format.Message(actual, "not to be false") + } else { + return fmt.Sprintf(`Expected not false but got false\nNegation of "%s" failed`, matcher.Reason) + } } diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go index 60bc1e3fa..3576aac88 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -9,6 +9,7 @@ import ( ) type BeTrueMatcher struct { + Reason string } func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { @@ -20,9 +21,17 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error } func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be true") + if matcher.Reason == "" { + return format.Message(actual, "to be true") + } else { + return matcher.Reason + } } func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be true") + if matcher.Reason == "" { + return format.Message(actual, "not to be true") + } else { + return fmt.Sprintf(`Expected not true but got true\nNegation of "%s" failed`, matcher.Reason) + } } diff --git a/vendor/modules.txt b/vendor/modules.txt index d7ce65608..b67979534 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -140,7 +140,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.29.0 +# github.com/onsi/gomega v1.30.0 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format From a33de9b0d1b683f85dc14d79b0a29bd0e738eb4b Mon Sep 17 00:00:00 2001 From: Steven Lee <84493651+stleerh@users.noreply.github.com> Date: Mon, 13 Nov 2023 02:05:12 -0800 Subject: [PATCH 11/16] NETOBSERV-1340: Add operator to "Networking" category in OperatorHub (#451) Co-authored-by: Joel Takvorian --- bundle/manifests/netobserv-operator.clusterserviceversion.yaml | 2 +- config/csv/bases/netobserv-operator.clusterserviceversion.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index e6bf379e8..43fb8d7f6 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -524,7 +524,7 @@ metadata: } ] capabilities: Seamless Upgrades - categories: Monitoring + categories: Monitoring, Networking console.openshift.io/plugins: '["netobserv-plugin"]' containerImage: quay.io/netobserv/network-observability-operator:1.0.4 createdAt: ':created-at:' diff --git a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml index 4616edb1f..95c13a0d2 100644 --- a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml +++ b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml @@ -4,7 +4,7 @@ metadata: annotations: alm-examples: '[]' capabilities: Seamless Upgrades - categories: Monitoring + categories: Monitoring, Networking console.openshift.io/plugins: '["netobserv-plugin"]' containerImage: ':container-image:' createdAt: ':created-at:' From ba957e92ef684b2db37709eb5c8132aaa46f9fc0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 11:10:33 +0100 Subject: [PATCH 12/16] Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.1 (#491) Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.13.0 to 2.13.1. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.13.0...v2.13.1) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +- go.sum | 14 +-- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 12 +++ .../ginkgo/v2/ginkgo/internal/test_suite.go | 3 +- .../onsi/ginkgo/v2/types/version.go | 2 +- .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 1 - vendor/golang.org/x/sys/plan9/race.go | 1 - vendor/golang.org/x/sys/plan9/race0.go | 1 - vendor/golang.org/x/sys/plan9/str.go | 1 - vendor/golang.org/x/sys/plan9/syscall.go | 1 - .../x/sys/plan9/zsyscall_plan9_386.go | 1 - .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 - .../x/sys/plan9/zsyscall_plan9_arm.go | 1 - vendor/golang.org/x/sys/unix/aliases.go | 2 - vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - .../x/sys/unix/asm_openbsd_mips64.s | 1 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 3 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 - vendor/golang.org/x/sys/unix/constants.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/dev_zos.go | 1 - vendor/golang.org/x/sys/unix/dirent.go | 1 - vendor/golang.org/x/sys/unix/endian_big.go | 1 - vendor/golang.org/x/sys/unix/endian_little.go | 1 - vendor/golang.org/x/sys/unix/env_unix.go | 1 - vendor/golang.org/x/sys/unix/epoll_zos.go | 1 - vendor/golang.org/x/sys/unix/fcntl.go | 1 - .../x/sys/unix/fcntl_linux_32bit.go | 1 - vendor/golang.org/x/sys/unix/fdset.go | 1 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 1 - vendor/golang.org/x/sys/unix/gccgo.go | 1 - vendor/golang.org/x/sys/unix/gccgo_c.c | 1 - .../x/sys/unix/gccgo_linux_amd64.go | 1 - vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 - vendor/golang.org/x/sys/unix/ioctl_signed.go | 1 - .../golang.org/x/sys/unix/ioctl_unsigned.go | 1 - vendor/golang.org/x/sys/unix/ioctl_zos.go | 1 - vendor/golang.org/x/sys/unix/mkerrors.sh | 1 - vendor/golang.org/x/sys/unix/mmap_nomremap.go | 1 - vendor/golang.org/x/sys/unix/mremap.go | 1 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 - .../golang.org/x/sys/unix/pledge_openbsd.go | 92 ++++--------------- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 - vendor/golang.org/x/sys/unix/race.go | 1 - vendor/golang.org/x/sys/unix/race0.go | 1 - .../x/sys/unix/readdirent_getdents.go | 1 - .../x/sys/unix/readdirent_getdirentries.go | 1 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 - .../x/sys/unix/sockcmsg_unix_other.go | 1 - vendor/golang.org/x/sys/unix/syscall.go | 1 - vendor/golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 1 - .../x/sys/unix/syscall_darwin_amd64.go | 1 - .../x/sys/unix/syscall_darwin_arm64.go | 1 - .../x/sys/unix/syscall_darwin_libSystem.go | 1 - .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_386.go | 1 - .../x/sys/unix/syscall_freebsd_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_arm.go | 1 - .../x/sys/unix/syscall_freebsd_arm64.go | 1 - .../x/sys/unix/syscall_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 - .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - .../golang.org/x/sys/unix/syscall_illumos.go | 1 - vendor/golang.org/x/sys/unix/syscall_linux.go | 5 +- .../x/sys/unix/syscall_linux_386.go | 1 - .../x/sys/unix/syscall_linux_alarm.go | 2 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 1 - .../x/sys/unix/syscall_linux_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - .../x/sys/unix/syscall_linux_gc_386.go | 1 - .../x/sys/unix/syscall_linux_gc_arm.go | 1 - .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - .../x/sys/unix/syscall_linux_loong64.go | 1 - .../x/sys/unix/syscall_linux_mips64x.go | 2 - .../x/sys/unix/syscall_linux_mipsx.go | 2 - .../x/sys/unix/syscall_linux_ppc.go | 1 - .../x/sys/unix/syscall_linux_ppc64x.go | 2 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../x/sys/unix/syscall_netbsd_386.go | 1 - .../x/sys/unix/syscall_netbsd_amd64.go | 1 - .../x/sys/unix/syscall_netbsd_arm.go | 1 - .../x/sys/unix/syscall_netbsd_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_openbsd.go | 14 ++- .../x/sys/unix/syscall_openbsd_386.go | 1 - .../x/sys/unix/syscall_openbsd_amd64.go | 1 - .../x/sys/unix/syscall_openbsd_arm.go | 1 - .../x/sys/unix/syscall_openbsd_arm64.go | 1 - .../x/sys/unix/syscall_openbsd_libc.go | 1 - .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 3 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 - vendor/golang.org/x/sys/unix/syscall_unix.go | 1 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - .../x/sys/unix/syscall_zos_s390x.go | 1 - vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 - vendor/golang.org/x/sys/unix/sysvshm_unix.go | 1 - .../x/sys/unix/sysvshm_unix_other.go | 1 - vendor/golang.org/x/sys/unix/timestruct.go | 1 - .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +++++---- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - .../x/sys/unix/zerrors_aix_ppc64.go | 1 - .../x/sys/unix/zerrors_darwin_amd64.go | 1 - .../x/sys/unix/zerrors_darwin_arm64.go | 1 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_386.go | 1 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_arm.go | 1 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 14 ++- .../x/sys/unix/zerrors_linux_386.go | 1 - .../x/sys/unix/zerrors_linux_amd64.go | 1 - .../x/sys/unix/zerrors_linux_arm.go | 1 - .../x/sys/unix/zerrors_linux_arm64.go | 1 - .../x/sys/unix/zerrors_linux_loong64.go | 2 +- .../x/sys/unix/zerrors_linux_mips.go | 1 - .../x/sys/unix/zerrors_linux_mips64.go | 1 - .../x/sys/unix/zerrors_linux_mips64le.go | 1 - .../x/sys/unix/zerrors_linux_mipsle.go | 1 - .../x/sys/unix/zerrors_linux_ppc.go | 1 - .../x/sys/unix/zerrors_linux_ppc64.go | 1 - .../x/sys/unix/zerrors_linux_ppc64le.go | 1 - .../x/sys/unix/zerrors_linux_riscv64.go | 4 +- .../x/sys/unix/zerrors_linux_s390x.go | 1 - .../x/sys/unix/zerrors_linux_sparc64.go | 1 - .../x/sys/unix/zerrors_netbsd_386.go | 1 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - .../x/sys/unix/zerrors_netbsd_arm.go | 1 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_386.go | 1 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - .../x/sys/unix/zerrors_openbsd_arm.go | 1 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - .../x/sys/unix/zerrors_solaris_amd64.go | 1 - .../x/sys/unix/zerrors_zos_s390x.go | 1 - .../x/sys/unix/zptrace_armnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 - .../x/sys/unix/zptrace_x86_linux.go | 2 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1 - .../x/sys/unix/zsyscall_darwin_amd64.go | 1 - .../x/sys/unix/zsyscall_darwin_arm64.go | 1 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_386.go | 1 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1 - .../x/sys/unix/zsyscall_illumos_amd64.go | 1 - .../golang.org/x/sys/unix/zsyscall_linux.go | 11 ++- .../x/sys/unix/zsyscall_linux_386.go | 1 - .../x/sys/unix/zsyscall_linux_amd64.go | 1 - .../x/sys/unix/zsyscall_linux_arm.go | 1 - .../x/sys/unix/zsyscall_linux_arm64.go | 1 - .../x/sys/unix/zsyscall_linux_loong64.go | 1 - .../x/sys/unix/zsyscall_linux_mips.go | 1 - .../x/sys/unix/zsyscall_linux_mips64.go | 1 - .../x/sys/unix/zsyscall_linux_mips64le.go | 1 - .../x/sys/unix/zsyscall_linux_mipsle.go | 1 - .../x/sys/unix/zsyscall_linux_ppc.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 - .../x/sys/unix/zsyscall_linux_riscv64.go | 1 - .../x/sys/unix/zsyscall_linux_s390x.go | 1 - .../x/sys/unix/zsyscall_linux_sparc64.go | 1 - .../x/sys/unix/zsyscall_netbsd_386.go | 1 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 1 - .../x/sys/unix/zsyscall_openbsd_386.go | 46 +++++++++- .../x/sys/unix/zsyscall_openbsd_386.s | 15 +++ .../x/sys/unix/zsyscall_openbsd_amd64.go | 46 +++++++++- .../x/sys/unix/zsyscall_openbsd_amd64.s | 15 +++ .../x/sys/unix/zsyscall_openbsd_arm.go | 46 +++++++++- .../x/sys/unix/zsyscall_openbsd_arm.s | 15 +++ .../x/sys/unix/zsyscall_openbsd_arm64.go | 46 +++++++++- .../x/sys/unix/zsyscall_openbsd_arm64.s | 15 +++ .../x/sys/unix/zsyscall_openbsd_mips64.go | 46 +++++++++- .../x/sys/unix/zsyscall_openbsd_mips64.s | 15 +++ .../x/sys/unix/zsyscall_openbsd_ppc64.go | 46 +++++++++- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 18 ++++ .../x/sys/unix/zsyscall_openbsd_riscv64.go | 46 +++++++++- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 15 +++ .../x/sys/unix/zsyscall_solaris_amd64.go | 1 - .../x/sys/unix/zsyscall_zos_s390x.go | 1 - .../x/sys/unix/zsysctl_openbsd_386.go | 1 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_386.go | 1 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_linux_386.go | 2 +- .../x/sys/unix/zsysnum_linux_amd64.go | 3 +- .../x/sys/unix/zsysnum_linux_arm.go | 2 +- .../x/sys/unix/zsysnum_linux_arm64.go | 2 +- .../x/sys/unix/zsysnum_linux_loong64.go | 2 +- .../x/sys/unix/zsysnum_linux_mips.go | 2 +- .../x/sys/unix/zsysnum_linux_mips64.go | 2 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 2 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 2 +- .../x/sys/unix/zsysnum_linux_s390x.go | 2 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 2 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_zos_s390x.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - .../x/sys/unix/ztypes_darwin_amd64.go | 1 - .../x/sys/unix/ztypes_darwin_arm64.go | 1 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_386.go | 1 - .../x/sys/unix/ztypes_freebsd_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_arm.go | 1 - .../x/sys/unix/ztypes_freebsd_arm64.go | 1 - .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 13 ++- .../golang.org/x/sys/unix/ztypes_linux_386.go | 1 - .../x/sys/unix/ztypes_linux_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 1 - .../x/sys/unix/ztypes_linux_arm64.go | 1 - .../x/sys/unix/ztypes_linux_loong64.go | 1 - .../x/sys/unix/ztypes_linux_mips.go | 1 - .../x/sys/unix/ztypes_linux_mips64.go | 1 - .../x/sys/unix/ztypes_linux_mips64le.go | 1 - .../x/sys/unix/ztypes_linux_mipsle.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 1 - .../x/sys/unix/ztypes_linux_ppc64.go | 1 - .../x/sys/unix/ztypes_linux_ppc64le.go | 1 - .../x/sys/unix/ztypes_linux_riscv64.go | 1 - .../x/sys/unix/ztypes_linux_s390x.go | 1 - .../x/sys/unix/ztypes_linux_sparc64.go | 1 - .../x/sys/unix/ztypes_netbsd_386.go | 1 - .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - .../x/sys/unix/ztypes_netbsd_arm.go | 1 - .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_386.go | 1 - .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - .../x/sys/unix/ztypes_openbsd_arm.go | 1 - .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - .../x/sys/unix/ztypes_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 1 - vendor/golang.org/x/sys/windows/aliases.go | 1 - vendor/golang.org/x/sys/windows/empty.s | 1 - vendor/golang.org/x/sys/windows/eventlog.go | 1 - vendor/golang.org/x/sys/windows/mksyscall.go | 1 - vendor/golang.org/x/sys/windows/race.go | 1 - vendor/golang.org/x/sys/windows/race0.go | 1 - vendor/golang.org/x/sys/windows/service.go | 1 - vendor/golang.org/x/sys/windows/str.go | 1 - vendor/golang.org/x/sys/windows/syscall.go | 1 - .../x/sys/windows/syscall_windows.go | 4 +- .../golang.org/x/sys/windows/types_windows.go | 28 +++++- .../x/sys/windows/zsyscall_windows.go | 9 ++ vendor/modules.txt | 8 +- 316 files changed, 604 insertions(+), 446 deletions(-) diff --git a/go.mod b/go.mod index 7a8f869e0..a073ec720 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/go-logr/logr v1.3.0 github.com/mitchellh/mapstructure v1.5.0 github.com/netobserv/flowlogs-pipeline v0.1.11-0.20231108130854-fac8a619b238 - github.com/onsi/ginkgo/v2 v2.13.0 + github.com/onsi/ginkgo/v2 v2.13.1 github.com/onsi/gomega v1.30.0 github.com/openshift/api v0.0.0-20220112145620-704957ce4980 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0 @@ -66,11 +66,11 @@ require ( golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/sys v0.14.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/tools v0.14.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/go.sum b/go.sum index 2843b2cf1..b4b971a29 100644 --- a/go.sum +++ b/go.sum @@ -165,8 +165,8 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= +github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -238,7 +238,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -285,8 +285,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= @@ -312,8 +312,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index fea67526e..102bb529f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,15 @@ +## 2.13.1 + +### Fixes +- # 1296 fix(precompiled test guite): exec bit check omitted on Windows (#1301) [26eea01] + +### Maintenance +- Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#1291) [7161a9d] +- Bump golang.org/x/sys from 0.13.0 to 0.14.0 (#1295) [7fc7b10] +- Bump golang.org/x/tools from 0.12.0 to 0.14.0 (#1282) [74bbd65] +- Bump github.com/onsi/gomega from 1.27.10 to 1.29.0 (#1290) [9373633] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1286) [6e3cf65] + ## 2.13.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go index 64dcb1b78..f3ae13bb1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -7,6 +7,7 @@ import ( "path" "path/filepath" "regexp" + "runtime" "strings" "github.com/onsi/ginkgo/v2/types" @@ -192,7 +193,7 @@ func precompiledTestSuite(path string) (TestSuite, error) { return TestSuite{}, errors.New("this is not a .test binary") } - if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { return TestSuite{}, errors.New("this is not executable") } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index a37f30828..7a794d87a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.13.0" +const VERSION = "2.13.1" diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index c9b69937a..73687de74 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.5 -// +build go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index 98bf56b73..fb9458218 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.5 -// +build !go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go index 62377d2ff..c02d9ed33 100644 --- a/vendor/golang.org/x/sys/plan9/race.go +++ b/vendor/golang.org/x/sys/plan9/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && race -// +build plan9,race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go index f8da30876..7b15e15f6 100644 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ b/vendor/golang.org/x/sys/plan9/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && !race -// +build plan9,!race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go index 55fa8d025..ba3e8ff8a 100644 --- a/vendor/golang.org/x/sys/plan9/str.go +++ b/vendor/golang.org/x/sys/plan9/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 67e5b0115..d631fd664 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 // Package plan9 contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 3f40b9bd7..f780d5c80 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && 386 -// +build plan9,386 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 0e6a96aa4..7de61065f 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && amd64 -// +build plan9,amd64 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 244c501b7..ea85780f0 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && arm -// +build plan9,arm package plan9 diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index abc89c104..e7d3df4bd 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c2e..269e173ca 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b3d..a4fcef0e0 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c349a..1e63615c5 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4adc..6496c3100 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a7391..4fd1f54da 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a8489..42f7eb9e4 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019ea..f8902667e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d07..3b4734870 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e43c..67e29f317 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d5140..d6ae269ce 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae02760..01e5e253c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 565357288..2abf12f6e 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2ce..f84bae712 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26c6..f08f62807 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d498934..bdfc024d2 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81d5..2e8c99612 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab339..2c394b11e 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a1169c..fab586a2c 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394c1..f949ec547 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e1858..2f67ba86d 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6adb8..a08657890 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 394a3965b..6fb7cb77d 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a998508..d78513461 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad0a..623a5e697 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59a0..bb6a64fe9 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 2499f977b..1ebf11782 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index a52026557..1095fd31d 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4ae..b9f0e277b 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d13..a96da71f4 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go index cedaf7e02..7753fddea 100644 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index e9b991258..58c6bfc70 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d44808b..13b4acd5c 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index a8068f94f..9e83d18cd 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go index e377cc9f4..c8bde601e 100644 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d74..aca5721dd 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index f98a1c542..d468b7b47 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a3d..972d61bd7 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a510..848840ae4 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 7def9580e..5b0759bd8 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || solaris -// +build aix solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 649913d1e..20f470b9d 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index cdc21bf76..c8b2a750f 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 47fa6a7eb..cbe24150a 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -663,7 +663,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index ca0513632..4b68e5978 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fa93d0aa9..fd45fe529 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd -// +build linux netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c5b..4d0a3430e 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294b2..6a09af53e 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7..3f0975f3d 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a01..a4d35db5d 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 6f6c5fec5..714d2aae7 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 706e1322a..4a9f6634c 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d6257569..dbd2b6ccb 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47c4..130398b6b 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943f6..c3a62dbb1 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4a5..4a1eab37e 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 63e8c8383..5ea74da98 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index e94e6cdac..67ce6cef2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index f2871fa95..1fdaa4760 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 75718ec0f..c87f9a9f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 4217de518..6f328e3a5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9..0eaecf5fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec9963..f36c6707c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c96641f..16dc69937 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d32120..14bab6b2d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b8da51004..3967bca77 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 47155c483..eff19ada2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 08932093f..4f24b517a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d151a0d0e..ac30759ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index d5cd64b37..aab725ca7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd4673..ba46651f8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3e4..df89f9e6b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6a8..a863f7052 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fb4e50224..a5e1c10e3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -417,7 +417,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -2482,3 +2483,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c7d9945ea..506dafa7b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac6a..38d55641b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 70601ce36..d557cf8de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3aa..facdb83b2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index da2986415..cd2dd797f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index f5266689a..cf2ee6c75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d7d..ffc4c2b63 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb489..9ebfdcf44 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fccd..5f2b57c4c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af242..d1a3ad826 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a12299..f2f67423e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index f6ab02ec1..3d0e98451 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 93fe59d25..70963a95a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index aae7f0ffd..c218ebd28 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 66eff19a3..e6c48500c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 806aa2574..7286a9aa8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5e6ceee12..6f5a28894 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2f89e8f5d..66f31210d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 7ca064ae7..11d1f1698 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d282f..7a5eb5743 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52e9..62d8957ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942f9..ce6a06885 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd81..d46d689d1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6f34479b5..d2882ee04 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -326,4 +321,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcdcb..9ddc89f4f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab25360e..70a3c96ee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4d4..265caa87f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde99d..ac4fda171 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f41..0a451e6dd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c2796139c..30a308cbb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7ff..ea954330f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b99cfa134..60c8142d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef81..e02d8ceae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index f6eda2705..77081de8c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca58..05c95bccf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707acf..23f39b7af 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 4596d041c..d99d05f1b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a4437f..4fcd38de2 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d17b..79a84f18b 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && !ios) || linux -// +build darwin,!ios linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddefdb..9eb0db664 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 616b1b284..7997b1902 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae77..cb7e598ce 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index f5f8e9f36..e16879396 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b79..2fb219d78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c26f..b0e6f5c85 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 143007627..e40fa8524 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index ab044a742..bb02aa6c0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e44..c0e0f8694 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c5138..6c6923906 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3be..dd9163f8e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69def..493a2a793 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2db..8b437b307 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d962..67c02dd57 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f9c7f479b..9c00cbf51 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -481,10 +480,14 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_LINK = 0x2000 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -521,6 +524,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -776,6 +780,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1698,6 +1704,7 @@ const ( KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -2275,6 +2282,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -3461,6 +3469,7 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 @@ -3473,6 +3482,7 @@ const ( XDP_UMEM_REG = 0x4 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 30aee00a5..4920821cf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8ebfa5127..a0c1e4112 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 271a21cdc..c63985560 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 910c330a3..47cc62e25 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a640798c9..27ac4a09e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -119,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0d5925d34..54694642a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index d72a00e0b..3adb81d75 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 02ba129f8..2dfe98f0d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8daa6dd96..f5398f84f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 63c8fa2f7..c54f152d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 930799ec1..76057dc72 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 8605a7dd7..e0c3725e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 95a016f1c..18f2813ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -228,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 1ae0108f5..11619d4ec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1bb7c6333..396d994da 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420d2..130085df4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c08..84769a1a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef9749f..602ded003 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba192..efc0406ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e474b..5a6500f83 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb2b..a5aeeb979 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955e4..0e9748a72 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe754..4f4449abc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe35..76a363f0f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b1e..43ca0cdfd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d403031..b1b8bb200 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a08..d2ddd3176 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d0506f..4dfd2e051 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 97f20ca28..586317c78 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 0b5f79430..d7c881be7 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2807f7e64..2d2de5d29 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 281ea64e3..5adc79fb5 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index d1d1d2331..6ea64a3c0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f99a18adc..99ee4399a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index c4d50ae50..b68a78362 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 6903d3b09..0a87450bf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1cad561e9..ccb02f240 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b18edbd0e..1b40b997b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0c67df64a..aad65fc79 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e6e05d145..c0096391a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7508accac..7664df749 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 7b56aead4..ae099182c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index cc623dcaa..11fd5d45b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 581849197..c3d2d6530 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 6be25cd19..c698cbc01 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1ff3aec74..faca7a557 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -2195,3 +2194,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 07b549cc2..4def3e9fc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5f481bf83..fef2bc8ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 824cd52c7..a9fd76a88 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e77aecfe9..460065028 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1e1..c8987d264 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 961a3afb7..921f43061 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ed05005e9..44f067829 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d365b718f..e7fa0abf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c3f1b8bbd..8c5125675 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index a6574cf98..7392fd45e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f40990264..41180434e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9dfcc2997..40c6ce7ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0ab4f2ed7..2cfe34adb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 6cde32237..61e6f0709 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5253d65bf..834b84204 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 2df3c5bac..e91ebc14a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index a60556bab..be28babbc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 9f788917a..fb587e826 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 82a4cb2dc..d576438bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 66b3b6456..88bfc2885 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 3dcacd30d..4cbeff171 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c5c4cc112..b8a67b99a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 2763620b0..1123f2757 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 93bfbb328..af50a65c0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index c92231404..82badae39 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index a107b8fda..8fb4ff36a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a6bc32c92..24d7eecb9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index c427de509..f469a83ee 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index b4e7bceab..9a498a067 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 60c1a99ae..c26ca2e1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index ca3f76600..1f224aa41 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -801,8 +801,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 52eba360f..bcc920dd2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 477a7d5b2..87a79c709 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b40189464..829b87feb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 1d8fe1d4b..94f011238 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e048471..3a58ae819 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf83..dcb7a0eb7 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51bd8..db5a7bf13 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a4a..7be575a77 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1a8..d6e3174c6 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e44054470..ee97157d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82fce..35c3b91d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff9b..5edda7687 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433bbf..0dc9e8b4d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c0c..308ddf3a1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d96107..418664e3d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b838..34d0b86d7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc106..b71cf45e2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc4274..e32df1c1e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa0c..15ad6111f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 9862853d3..fcf3ecbdd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -448,4 +447,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8901f0f4e..f56dc2504 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -370,4 +369,6 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 6902c37ee..974bf2467 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -412,4 +411,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a6d3dff81..39a2739e2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -315,4 +314,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index b18f3f710..cf9c9d77e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -309,4 +308,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 0302e5e3d..10b7362ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 6693ba4a0..cd4d8b4fd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index fd93f4987..2c0efca81 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 760ddcadc..a72e31d39 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index cff2b2555..c7d1e3747 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -439,4 +438,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index a4b2405d0..f4d4838c8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index aca54b4e3..b64f0e591 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9d1738d64..95711195a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -316,4 +315,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 022878dc8..f94e943bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -377,4 +376,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4100a761c..ba0c2bc51 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -390,4 +389,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699eba..b2aa8cd49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4f1..524a1b1c9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6db..d59b943ac 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952efa..31e771d53 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 597733813..9fd77c6cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af29189..af10af28c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a97..cc2028af4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef5910..c06dd4415 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a01f..9ddbf3e08 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa24..19a6ee413 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0ec..05192a782 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad43..b2e308581 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c1d..3e6d57cae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733c5..3a219bdce 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 690cefc3d..091d107f3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 5bffc10ea..28ff4ef74 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9b8..30e405bb4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 29dc48337..6cbd094a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 0a89b2890..7c03b6ee7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index c8666bb15..422107ee8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 88fb48a88..505a12acf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 698dc975e..cc986c790 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 18aa70b42..997bcd55a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -5883,3 +5882,15 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 6d8acbcc5..438a30aff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 59293c688..adceca355 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 40cfa38c2..eeaa00a37 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 055bc4216..6739aa91d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index f28affbc6..9920ef631 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 9d71e7ccd..2923b799a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index fd5ccd332..ce2750ee4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7704de77a..3038811d7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index df00b8757..efc6fed18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 0942840db..9a654b75a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 034874395..40d358e33 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bad067047..148c6ceb8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 1b4c97c32..72ba81543 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index aa268d025..71e765508 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 444045b6c..4abbdb9de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f9d..f22e7947d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f655d..066a7d83d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a19..439548ec9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 11121151c..16085d3bb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23b7..afd13a3af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a5479886..5d97f1f9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e1f..34871cdc1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 52338266c..5911bceb3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb12..e4f24f3bc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c010..ca50a7930 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a43..d7d7f7902 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747c6..14160576d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efcb3..54f31be63 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index a20ebea63..ce2d713d6 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && go1.9 -// +build windows,go1.9 package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s index fdbbbcd31..ba64caca5 100644 --- a/vendor/golang.org/x/sys/windows/empty.s +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.12 -// +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 // and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645e..6c366955d 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c5..dbcdb090c 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 9196b089c..0f1bdc386 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 7bae4817a..0c78da78b 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index c44a1b963..a9dc6308d 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 4fc01434e..6a4f9ce6a 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb95..e85ed6b9c 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 35cfc57ca..fb6cfd046 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -233,6 +233,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -969,7 +970,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b88dc7c85..359780f6a 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8b1688de4..db6282e00 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -253,6 +253,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -2185,6 +2186,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) diff --git a/vendor/modules.txt b/vendor/modules.txt index b67979534..d6b5a9dfc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -118,7 +118,7 @@ github.com/mwitkow/go-conntrack github.com/netobserv/flowlogs-pipeline/pkg/api github.com/netobserv/flowlogs-pipeline/pkg/config github.com/netobserv/flowlogs-pipeline/pkg/utils -# github.com/onsi/ginkgo/v2 v2.13.0 +# github.com/onsi/ginkgo/v2 v2.13.1 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -236,8 +236,8 @@ golang.org/x/net/trace golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sys v0.13.0 -## explicit; go 1.17 +# golang.org/x/sys v0.14.0 +## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows @@ -269,7 +269,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.13.0 +# golang.org/x/tools v0.14.0 ## explicit; go 1.18 golang.org/x/tools/go/ast/inspector golang.org/x/tools/internal/typeparams From 6dbf85ae6136f155776337177783264d10dab68d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 11:11:18 +0100 Subject: [PATCH 13/16] Bump github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring (#492) Bumps [github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring](https://github.com/prometheus-operator/prometheus-operator) from 0.68.0 to 0.69.1. - [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases) - [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.68.0...v0.69.1) --- updated-dependencies: - dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- .../apis/monitoring/v1/alertmanager_types.go | 14 +++- .../apis/monitoring/v1/prometheus_types.go | 78 +++++++++++++++---- .../pkg/apis/monitoring/v1/thanos_types.go | 13 +++- .../monitoring/v1/zz_generated.deepcopy.go | 46 +++++++++++ vendor/modules.txt | 2 +- 7 files changed, 134 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index a073ec720..229f13dd8 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/onsi/ginkgo/v2 v2.13.1 github.com/onsi/gomega v1.30.0 github.com/openshift/api v0.0.0-20220112145620-704957ce4980 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1 github.com/prometheus/common v0.44.0 github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.26.0 diff --git a/go.sum b/go.sum index b4b971a29..be6e05ca7 100644 --- a/go.sum +++ b/go.sum @@ -181,8 +181,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0 h1:yl9ceUSUBo9woQIO+8eoWpcxZkdZgm89g+rVvu37TUw= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0/go.mod h1:9Uuu3pEU2jB8PwuqkHvegQ0HV/BlZRJUyfTYAqfdVF8= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1 h1:hOnp+1FLBm+ifsyiRbunmfSs99jKAq+Tr5elCmo5l5U= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1/go.mod h1:JtflYMUMay9HGil4aRg+dSj6X6mngtuBJf/ULOCxbxI= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go index 9bcbf3207..78815919f 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -60,7 +60,15 @@ func (l *Alertmanager) DeepCopyObject() runtime.Object { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type AlertmanagerSpec struct { - // PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. + // PodMetadata configures labels and annotations which are propagated to the Alertmanager pods. + // + // The following items are reserved and cannot be overridden: + // * "alertmanager" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/instance" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "alertmanager". + // * "app.kubernetes.io/version" label, set to the Alertmanager version. + // * "kubectl.kubernetes.io/default-container" annotation, set to "alertmanager". PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the @@ -115,10 +123,10 @@ type AlertmanagerSpec struct { // receiver (effectively dropping alert notifications). ConfigSecret string `json:"configSecret,omitempty"` // Log level for Alertmanager to be configured with. - //+kubebuilder:validation:Enum="";debug;info;warn;error + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` // Log format for Alertmanager to be configured with. - //+kubebuilder:validation:Enum="";logfmt;json + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` // Size is the expected size of the alertmanager cluster. The controller will // eventually make the size of the running cluster equal to the expected diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go index 7dc956109..fff9bf06d 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -60,6 +60,16 @@ func (l *Prometheus) GetStatus() PrometheusStatus { // +k8s:deepcopy-gen=true type CommonPrometheusFields struct { // PodMetadata configures labels and annotations which are propagated to the Prometheus pods. + // + // The following items are reserved and cannot be overridden: + // * "prometheus" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/instance" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "prometheus". + // * "app.kubernetes.io/version" label, set to the Prometheus version. + // * "operator.prometheus.io/name" label, set to the name of the Prometheus object. + // * "operator.prometheus.io/shard" label, set to the shard number of the Prometheus object. + // * "kubectl.kubernetes.io/default-container" annotation, set to "prometheus". PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // ServiceMonitors to be selected for target discovery. An empty label @@ -205,10 +215,10 @@ type CommonPrometheusFields struct { PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` // Log level for Prometheus and the config-reloader sidecar. - //+kubebuilder:validation:Enum="";debug;info;warn;error + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` // Log format for Log level for Prometheus and the config-reloader sidecar. - //+kubebuilder:validation:Enum="";logfmt;json + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` // Interval between consecutive scrapes. @@ -302,7 +312,7 @@ type CommonPrometheusFields struct { // +optional Tolerations []v1.Toleration `json:"tolerations,omitempty"` // Defines the pod's topology spread constraints if specified. - //+optional + // +optional TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` // Defines the list of remote write configurations. @@ -407,7 +417,7 @@ type CommonPrometheusFields struct { // When true, `spec.namespaceSelector` from all PodMonitor, ServiceMonitor // and Probe objects will be ignored. They will only discover targets // within the namespace of the PodMonitor, ServiceMonitor and Probe - // objec. + // object. IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` // When not empty, a label will be added to @@ -787,7 +797,7 @@ type PrometheusSpec struct { type PrometheusTracingConfig struct { // Client used to export the traces. Supported values are `http` or `grpc`. - //+kubebuilder:validation:Enum=http;grpc + // +kubebuilder:validation:Enum=http;grpc // +optional ClientType *string `json:"clientType"` @@ -809,7 +819,7 @@ type PrometheusTracingConfig struct { Headers map[string]string `json:"headers"` // Compression key for supported compression types. The only supported value is `gzip`. - //+kubebuilder:validation:Enum=gzip + // +kubebuilder:validation:Enum=gzip // +optional Compression *string `json:"compression"` @@ -1023,10 +1033,10 @@ type ThanosSpec struct { GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` // Log level for the Thanos sidecar. - //+kubebuilder:validation:Enum="";debug;info;warn;error + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` // Log format for the Thanos sidecar. - //+kubebuilder:validation:Enum="";logfmt;json + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` // Defines the start of time range limit served by the Thanos sidecar's StoreAPI. @@ -1121,12 +1131,12 @@ type RemoteWriteSpec struct { // // It requires Prometheus >= v2.27.0. // - // Cannot be set at the same time as `sigv4`, `authorization`, or `basicAuth`. + // Cannot be set at the same time as `sigv4`, `authorization`, `basicAuth`, or `azureAd`. // +optional OAuth2 *OAuth2 `json:"oauth2,omitempty"` // BasicAuth configuration for the URL. // - // Cannot be set at the same time as `sigv4`, `authorization`, or `oauth2`. + // Cannot be set at the same time as `sigv4`, `authorization`, `oauth2`, or `azureAd`. // // +optional BasicAuth *BasicAuth `json:"basicAuth,omitempty"` @@ -1138,7 +1148,7 @@ type RemoteWriteSpec struct { // // It requires Prometheus >= v2.26.0. // - // Cannot be set at the same time as `sigv4`, `basicAuth`, or `oauth2`. + // Cannot be set at the same time as `sigv4`, `basicAuth`, `oauth2`, or `azureAd`. // // +optional Authorization *Authorization `json:"authorization,omitempty"` @@ -1146,11 +1156,20 @@ type RemoteWriteSpec struct { // // It requires Prometheus >= v2.26.0. // - // Cannot be set at the same time as `authorization`, `basicAuth`, or `oauth2`. + // Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `azureAd`. // // +optional Sigv4 *Sigv4 `json:"sigv4,omitempty"` + // AzureAD for the URL. + // + // It requires Prometheus >= v2.45.0. + // + // Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `sigv4`. + // + // +optional + AzureAD *AzureAD `json:"azureAd,omitempty"` + // *Warning: this field shouldn't be used because the token value appears // in clear-text. Prefer using `authorization`.* // @@ -1219,6 +1238,26 @@ type Sigv4 struct { RoleArn string `json:"roleArn,omitempty"` } +// AzureAD defines the configuration for remote write's azuread parameters. +// +k8s:openapi-gen=true +type AzureAD struct { + // The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. + // +kubebuilder:validation:Enum=AzureChina;AzureGovernment;AzurePublic + // +optional + Cloud *string `json:"cloud,omitempty"` + // ManagedIdentity defines the Azure User-assigned Managed identity. + // +required + ManagedIdentity ManagedIdentity `json:"managedIdentity"` +} + +// ManagedIdentity defines the Azure User-assigned Managed identity. +// +k8s:openapi-gen=true +type ManagedIdentity struct { + // The client id + // +required + ClientID string `json:"clientId"` +} + // RemoteReadSpec defines the configuration for Prometheus to read back samples // from a remote endpoint. // +k8s:openapi-gen=true @@ -1428,25 +1467,34 @@ type AlertmanagerEndpoints struct { // BasicAuth configuration for Alertmanager. // - // Cannot be set at the same time as `bearerTokenFile`, or `authorization`. + // Cannot be set at the same time as `bearerTokenFile`, `authorization` or `sigv4`. // // +optional BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // File to read bearer token for Alertmanager. // - // Cannot be set at the same time as `basicAuth`, or `authorization`. + // Cannot be set at the same time as `basicAuth`, `authorization`, or `sigv4`. // // *Deprecated: this will be removed in a future release. Prefer using `authorization`.* BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Authorization section for Alertmanager. // - // Cannot be set at the same time as `basicAuth`, or `bearerTokenFile`. + // Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `sigv4`. // // +optional Authorization *SafeAuthorization `json:"authorization,omitempty"` + // Sigv4 allows to configures AWS's Signature Verification 4 for the URL. + // + // It requires Prometheus >= v2.48.0. + // + // Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `authorization`. + // + // +optional + Sigv4 *Sigv4 `json:"sigv4,omitempty"` + // Version of the Alertmanager API that Prometheus uses to send alerts. // It can be "v1" or "v2". APIVersion string `json:"apiVersion,omitempty"` diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go index 6310b623b..c155df068 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -68,7 +68,14 @@ type ThanosRulerList struct { type ThanosRulerSpec struct { // Version of Thanos to be deployed. Version string `json:"version,omitempty"` - // PodMetadata contains Labels and Annotations gets propagated to the thanos ruler pods. + // PodMetadata configures labels and annotations which are propagated to the ThanosRuler pods. + // + // The following items are reserved and cannot be overridden: + // * "app.kubernetes.io/name" label, set to "thanos-ruler". + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/instance" label, set to the name of the ThanosRuler instance. + // * "thanos-ruler" label, set to the name of the ThanosRuler instance. + // * "kubectl.kubernetes.io/default-container" annotation, set to "thanos-ruler". PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // Thanos container image URL. Image string `json:"image,omitempty"` @@ -158,10 +165,10 @@ type ThanosRulerSpec struct { // Deprecated: use excludedFromEnforcement instead. PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` // Log level for ThanosRuler to be configured with. - //+kubebuilder:validation:Enum="";debug;info;warn;error + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` // Log format for ThanosRuler to be configured with. - //+kubebuilder:validation:Enum="";logfmt;json + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` // Port name used for the pods and governing service. // Defaults to `web`. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go index 5f85b15fc..101879d8e 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -157,6 +157,11 @@ func (in *AlertmanagerEndpoints) DeepCopyInto(out *AlertmanagerEndpoints) { *out = new(SafeAuthorization) (*in).DeepCopyInto(*out) } + if in.Sigv4 != nil { + in, out := &in.Sigv4, &out.Sigv4 + *out = new(Sigv4) + (*in).DeepCopyInto(*out) + } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout *out = new(Duration) @@ -525,6 +530,27 @@ func (in *AuthorizationValidationError) DeepCopy() *AuthorizationValidationError return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureAD) DeepCopyInto(out *AzureAD) { + *out = *in + if in.Cloud != nil { + in, out := &in.Cloud, &out.Cloud + *out = new(string) + **out = **in + } + out.ManagedIdentity = in.ManagedIdentity +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAD. +func (in *AzureAD) DeepCopy() *AzureAD { + if in == nil { + return nil + } + out := new(AzureAD) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { *out = *in @@ -1170,6 +1196,21 @@ func (in *HostPort) DeepCopy() *HostPort { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentity) DeepCopyInto(out *ManagedIdentity) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentity. +func (in *ManagedIdentity) DeepCopy() *ManagedIdentity { + if in == nil { + return nil + } + out := new(ManagedIdentity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetadataConfig) DeepCopyInto(out *MetadataConfig) { *out = *in @@ -2236,6 +2277,11 @@ func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) { *out = new(Sigv4) (*in).DeepCopyInto(*out) } + if in.AzureAD != nil { + in, out := &in.AzureAD, &out.AzureAD + *out = new(AzureAD) + (*in).DeepCopyInto(*out) + } if in.TLSConfig != nil { in, out := &in.TLSConfig, &out.TLSConfig *out = new(TLSConfig) diff --git a/vendor/modules.txt b/vendor/modules.txt index d6b5a9dfc..90adc43c5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -164,7 +164,7 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0 +# github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.69.1 ## explicit; go 1.17 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 From 38dba7e0e4149c1d23f281844a8e8bf6ea1c18cc Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Fri, 3 Nov 2023 17:09:52 -0400 Subject: [PATCH 14/16] Update camelCase for other fields --- api/v1alpha1/flowcollector_webhook.go | 10 +- api/v1alpha1/zz_generated.conversion.go | 162 +++--------------- api/v1beta1/zz_generated.conversion.go | 158 +++-------------- api/v1beta2/flowcollector_types.go | 88 +++++----- api/v1beta2/zz_generated.deepcopy.go | 28 +-- .../flows.netobserv.io_flowcollectors.yaml | 46 ++--- ...observ-operator.clusterserviceversion.yaml | 10 +- .../flows.netobserv.io_flowcollectors.yaml | 46 ++--- config/descriptions/ocp.md | 4 +- config/descriptions/upstream.md | 4 +- config/manager/kustomization.yaml | 4 +- .../samples/flows_v1beta2_flowcollector.yaml | 4 +- .../consoleplugin/consoleplugin_objects.go | 8 +- controllers/ebpf/agent_controller.go | 58 +++---- .../ebpf/internal/permissions/permissions.go | 6 +- controllers/flowcollector_controller.go | 6 +- .../flowcollector_controller_ebpf_test.go | 12 +- .../flowcollector_controller_iso_test.go | 6 +- controllers/flowcollector_controller_test.go | 14 +- .../flowlogspipeline/flp_common_objects.go | 18 +- .../flowlogspipeline/flp_ingest_objects.go | 2 +- .../flowlogspipeline/flp_ingest_reconciler.go | 2 +- .../flowlogspipeline/flp_monolith_objects.go | 2 +- controllers/flowlogspipeline/flp_test.go | 4 +- .../flowlogspipeline/flp_transfo_objects.go | 2 +- controllers/ovs/flowsconfig_cno_reconciler.go | 10 +- .../ovs/flowsconfig_ovnk_reconciler.go | 18 +- controllers/ovs/flowsconfig_types.go | 4 +- docs/FlowCollector.md | 36 ++-- ...ned.flows.netobserv.io_flowcollectors.yaml | 32 ++-- pkg/helper/flowcollector.go | 24 +-- 31 files changed, 300 insertions(+), 528 deletions(-) diff --git a/api/v1alpha1/flowcollector_webhook.go b/api/v1alpha1/flowcollector_webhook.go index a82f1de33..7433ae370 100644 --- a/api/v1alpha1/flowcollector_webhook.go +++ b/api/v1alpha1/flowcollector_webhook.go @@ -46,9 +46,9 @@ func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { } // Agent - if restored.Spec.Agent.EBPF.Features != nil { - dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) - copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) + if restored.Spec.Agent.Ebpf.Features != nil { + dst.Spec.Agent.Ebpf.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.Ebpf.Features)) + copy(dst.Spec.Agent.Ebpf.Features, restored.Spec.Agent.Ebpf.Features) } // Processor @@ -169,8 +169,8 @@ func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsole // This function need to be manually created because conversion-gen not able to create it intentionally because // we have new defined fields in v1beta1 not in v1alpha1 // nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) +func Convert_v1beta2_FlowCollectorEbpf_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEbpf, out *FlowCollectorEBPF, s apiconversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorEbpf_To_v1alpha1_FlowCollectorEBPF(in, out, s) } // This function need to be manually created because conversion-gen not able to create it intentionally because diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index d9afc6552..688133a91 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -113,33 +113,28 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) }); err != nil { return err } @@ -258,16 +253,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) }); err != nil { @@ -505,24 +490,16 @@ func Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCol } func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - out.Type = v1beta2.FlowCollectorAgentType(in.Type) - if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + out.Type = in.Type + // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type + // WARNING: in.EBPF requires manual conversion: does not exist in peer-type return nil } func autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - out.Type = string(in.Type) - if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + out.Type = in.Type + // WARNING: in.Ipfix requires manual conversion: does not exist in peer-type + // WARNING: in.Ebpf requires manual conversion: does not exist in peer-type return nil } @@ -574,52 +551,12 @@ func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorCon return nil } -func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1alpha1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta2_DebugConfig_To_v1alpha1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - // WARNING: in.Features requires manual conversion: does not exist in peer-type - return nil -} - func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { out.Type = v1beta2.ExporterType(in.Type) if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIpfixReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -630,7 +567,7 @@ func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIpfixReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -722,66 +659,9 @@ func autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1bet return nil } -func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in, out, s) +// Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s) } func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 27298ef24..9d3dd7ff8 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -118,33 +118,28 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) + if err := s.AddGeneratedConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) }); err != nil { return err } @@ -503,24 +498,16 @@ func Convert_v1beta2_FlowCollector_To_v1beta1_FlowCollector(in *v1beta2.FlowColl } func autoConvert_v1beta1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - out.Type = v1beta2.FlowCollectorAgentType(in.Type) - if err := Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + out.Type = in.Type + // WARNING: in.IPFIX requires manual conversion: does not exist in peer-type + // WARNING: in.EBPF requires manual conversion: does not exist in peer-type return nil } func autoConvert_v1beta2_FlowCollectorAgent_To_v1beta1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - out.Type = string(in.Type) - if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } + out.Type = in.Type + // WARNING: in.Ipfix requires manual conversion: does not exist in peer-type + // WARNING: in.Ebpf requires manual conversion: does not exist in peer-type return nil } @@ -570,58 +557,12 @@ func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsoleP return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1beta1_FlowCollectorConsolePlugin(in, out, s) } -func autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta1_DebugConfig_To_v1beta2_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - out.Features = *(*[]v1beta2.AgentFeature)(unsafe.Pointer(&in.Features)) - return nil -} - -// Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - if err := Convert_v1beta2_DebugConfig_To_v1beta1_DebugConfig(&in.Debug, &out.Debug, s); err != nil { - return err - } - out.Features = *(*[]AgentFeature)(unsafe.Pointer(&in.Features)) - return nil -} - -// Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorEBPF_To_v1beta1_FlowCollectorEBPF(in, out, s) -} - func autoConvert_v1beta1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { out.Type = v1beta2.ExporterType(in.Type) if err := Convert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIpfixReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -632,7 +573,7 @@ func autoConvert_v1beta2_FlowCollectorExporter_To_v1beta1_FlowCollectorExporter( if err := Convert_v1beta2_FlowCollectorKafka_To_v1beta1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { return err } - if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { + if err := Convert_v1beta2_FlowCollectorIpfixReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { return err } return nil @@ -712,66 +653,9 @@ func autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta return nil } -func autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1beta1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta2_OVNKubernetesConfig_To_v1beta1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1beta1_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1beta1_FlowCollectorIPFIXReceiver(in, out, s) +// Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA is an autogenerated conversion function. +func Convert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { + return autoConvert_v1beta2_FlowCollectorHPA_To_v1beta1_FlowCollectorHPA(in, out, s) } func autoConvert_v1beta1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { diff --git a/api/v1beta2/flowcollector_types.go b/api/v1beta2/flowcollector_types.go index d62e9e1df..fdc079102 100644 --- a/api/v1beta2/flowcollector_types.go +++ b/api/v1beta2/flowcollector_types.go @@ -26,8 +26,10 @@ import ( type FlowCollectorDeploymentModel string const ( - DeploymentModelDirect FlowCollectorDeploymentModel = "Direct" - DeploymentModelKafka FlowCollectorDeploymentModel = "Kafka" + AgentIpfix = "Ipfix" + AgentEbpf = "Ebpf" + DeploymentModelDirect = "Direct" + DeploymentModelKafka = "Kafka" ) // Please notice that the FlowCollectorSpec's properties MUST redefine one of the default @@ -70,7 +72,7 @@ type FlowCollectorSpec struct { // +unionDiscriminator // +kubebuilder:validation:Enum:="Direct";"Kafka" // +kubebuilder:default:=Direct - DeploymentModel FlowCollectorDeploymentModel `json:"deploymentModel,omitempty"` + DeploymentModel string `json:"deploymentModel,omitempty"` // Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`. // +optional @@ -94,30 +96,30 @@ const ( // +union type FlowCollectorAgent struct { // `type` selects the flows tracing agent. Possible values are:
- // - `eBPF` (default) to use NetObserv eBPF agent.
- // - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- // `eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. - // `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, + // - `Ebpf` (default) to use NetObserv eBPF agent.
+ // - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
+ // `Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. + // `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, // but they would require manual configuration). // +unionDiscriminator - // +kubebuilder:validation:Enum:="eBPF";"IPFIX" - // +kubebuilder:default:=eBPF - Type FlowCollectorAgentType `json:"type,omitempty"` + // +kubebuilder:validation:Enum:="Ebpf";"Ipfix" + // +kubebuilder:default:=Ebpf + Type string `json:"type,omitempty"` - // `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` - // is set to `IPFIX`. + // `ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` + // is set to `Ipfix`. // +optional - IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` + Ipfix FlowCollectorIpfix `json:"ipfix,omitempty"` // `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` - // is set to `eBPF`. + // is set to `Ebpf`. // +optional - EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` + Ebpf FlowCollectorEbpf `json:"ebpf,omitempty"` } -// `FlowCollectorIPFIX` defines a FlowCollector that uses IPFIX on OVN-Kubernetes to collect the +// `FlowCollectorIpfix` defines a FlowCollector that uses Ipfix on OVN-Kubernetes to collect the // flows information -type FlowCollectorIPFIX struct { +type FlowCollectorIpfix struct { // Important: Run "make generate" to regenerate code after modifying this file //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ @@ -135,12 +137,12 @@ type FlowCollectorIPFIX struct { // `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. // To ensure cluster stability, it is not possible to set a value below 2. // If you really want to sample every packet, which might impact the cluster stability, - // refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX. + // refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix. Sampling int32 `json:"sampling,omitempty" mapstructure:"sampling,omitempty"` //+kubebuilder:default:=false - // `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. - // It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. + // `forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. + // It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. // If you REALLY want to do that, set this flag to `true`. Use at your own risk. // When it is set to `true`, the value of `sampling` is ignored. ForceSampleAll bool `json:"forceSampleAll,omitempty" mapstructure:"-"` @@ -148,7 +150,7 @@ type FlowCollectorIPFIX struct { // `clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available. ClusterNetworkOperator ClusterNetworkOperatorConfig `json:"clusterNetworkOperator,omitempty" mapstructure:"-"` - // `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. + // `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` } @@ -165,8 +167,8 @@ const ( FlowRTT AgentFeature = "FlowRTT" ) -// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information -type FlowCollectorEBPF struct { +// `FlowCollectorEbpf` defines a FlowCollector that uses eBPF to collect the flows information +type FlowCollectorEbpf struct { // Important: Run "make generate" to regenerate code after modifying this file //+kubebuilder:validation:Enum=IfNotPresent;Always;Never @@ -271,21 +273,27 @@ type FlowCollectorKafka struct { SASL SASLConfig `json:"sasl"` } -type FlowCollectorIPFIXReceiver struct { +type FlowCollectorIpfixReceiver struct { //+kubebuilder:default:="" - // Address of the IPFIX external receiver + // Address of the Ipfix external receiver TargetHost string `json:"targetHost"` - // Port for the IPFIX external receiver + // Port for the Ipfix external receiver TargetPort int `json:"targetPort"` - // Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. + // Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`. // +unionDiscriminator // +kubebuilder:validation:Enum:="TCP";"UDP" // +optional Transport string `json:"transport,omitempty"` } +const ( + ServerTLSDisabled = "Disabled" + ServerTLSProvided = "Provided" + ServerTLSAuto = "Auto" +) + type ServerTLSConfigType string const ( @@ -492,8 +500,8 @@ type FlowCollectorFLP struct { type HPAStatus string const ( - HPAStatusDisabled HPAStatus = "Disabled" - HPAStatusEnabled HPAStatus = "Enabled" + HPAStatusDisabled = "Disabled" + HPAStatusEnabled = "Enabled" ) type FlowCollectorHPA struct { @@ -502,7 +510,7 @@ type FlowCollectorHPA struct { // `status` describes the desired status regarding deploying an horizontal pod autoscaler.
// - `Disabled` does not deploy an horizontal pod autoscaler.
// - `Enabled` deploys an horizontal pod autoscaler.
- Status HPAStatus `json:"status,omitempty"` + Status string `json:"status,omitempty"` // `minReplicas` is the lower limit for the number of replicas to which the autoscaler // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the @@ -525,9 +533,9 @@ type FlowCollectorHPA struct { type LokiAuthToken string const ( - LokiAuthDisabled LokiAuthToken = "Disabled" - LokiAuthUseHostToken LokiAuthToken = "Host" - LokiAuthForwardUserToken LokiAuthToken = "Forward" + LokiAuthDisabled = "Disabled" + LokiAuthUseHostToken = "Host" + LokiAuthForwardUserToken = "Forward" ) // `LokiManualParams` defines the full connection parameters to Loki. @@ -565,7 +573,7 @@ type LokiManualParams struct { // - `Forward` forwards the user token for authorization.
// - `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
// When using the Loki Operator, this must be set to `Forward`. - AuthToken LokiAuthToken `json:"authToken,omitempty"` + AuthToken string `json:"authToken,omitempty"` // TLS client configuration for Loki URL. // +optional @@ -914,14 +922,14 @@ type ExporterType string const ( KafkaExporter ExporterType = "Kafka" - IpfixExporter ExporterType = "IPFIX" + IpfixExporter ExporterType = "Ipfix" ) // `FlowCollectorExporter` defines an additional exporter to send enriched flows to. type FlowCollectorExporter struct { - // `type` selects the type of exporters. The available options are `Kafka` and `IPFIX`. + // `type` selects the type of exporters. The available options are `Kafka` and `Ipfix`. // +unionDiscriminator - // +kubebuilder:validation:Enum:="Kafka";"IPFIX" + // +kubebuilder:validation:Enum:="Kafka";"Ipfix" // +kubebuilder:validation:Required Type ExporterType `json:"type"` @@ -929,9 +937,9 @@ type FlowCollectorExporter struct { // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` - // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. + // Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to. // +optional - IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` + IPFIX FlowCollectorIpfixReceiver `json:"ipfix,omitempty"` } // `FlowCollectorStatus` defines the observed state of FlowCollector @@ -949,7 +957,7 @@ type FlowCollectorStatus struct { // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster // +kubebuilder:printcolumn:name="Agent",type="string",JSONPath=`.spec.agent.type` -// +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` +// +kubebuilder:printcolumn:name="Sampling (Ebpf)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[*].reason" // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 8aeef027d..216b6c298 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -198,8 +198,8 @@ func (in *FlowCollector) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowCollectorAgent) DeepCopyInto(out *FlowCollectorAgent) { *out = *in - out.IPFIX = in.IPFIX - in.EBPF.DeepCopyInto(&out.EBPF) + out.Ipfix = in.Ipfix + in.Ebpf.DeepCopyInto(&out.Ebpf) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorAgent. @@ -253,7 +253,7 @@ func (in *FlowCollectorConsolePlugin) DeepCopy() *FlowCollectorConsolePlugin { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { +func (in *FlowCollectorEbpf) DeepCopyInto(out *FlowCollectorEbpf) { *out = *in in.Resources.DeepCopyInto(&out.Resources) if in.Sampling != nil { @@ -279,12 +279,12 @@ func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEBPF. -func (in *FlowCollectorEBPF) DeepCopy() *FlowCollectorEBPF { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEbpf. +func (in *FlowCollectorEbpf) DeepCopy() *FlowCollectorEbpf { if in == nil { return nil } - out := new(FlowCollectorEBPF) + out := new(FlowCollectorEbpf) in.DeepCopyInto(out) return out } @@ -388,33 +388,33 @@ func (in *FlowCollectorHPA) DeepCopy() *FlowCollectorHPA { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIX) DeepCopyInto(out *FlowCollectorIPFIX) { +func (in *FlowCollectorIpfix) DeepCopyInto(out *FlowCollectorIpfix) { *out = *in out.ClusterNetworkOperator = in.ClusterNetworkOperator out.OVNKubernetes = in.OVNKubernetes } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIX. -func (in *FlowCollectorIPFIX) DeepCopy() *FlowCollectorIPFIX { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIpfix. +func (in *FlowCollectorIpfix) DeepCopy() *FlowCollectorIpfix { if in == nil { return nil } - out := new(FlowCollectorIPFIX) + out := new(FlowCollectorIpfix) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIXReceiver) DeepCopyInto(out *FlowCollectorIPFIXReceiver) { +func (in *FlowCollectorIpfixReceiver) DeepCopyInto(out *FlowCollectorIpfixReceiver) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIXReceiver. -func (in *FlowCollectorIPFIXReceiver) DeepCopy() *FlowCollectorIPFIXReceiver { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIpfixReceiver. +func (in *FlowCollectorIpfixReceiver) DeepCopy() *FlowCollectorIpfixReceiver { if in == nil { return nil } - out := new(FlowCollectorIPFIXReceiver) + out := new(FlowCollectorIpfixReceiver) in.DeepCopyInto(out) return out } diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 4d2425541..5582e878a 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -5107,7 +5107,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) + name: Sampling (Ebpf) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -5147,7 +5147,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `eBPF`.' + flow reporter when `spec.agent.type` is set to `Ebpf`.' properties: cacheActiveTimeout: default: 5s @@ -5338,8 +5338,8 @@ spec: type: object ipfix: description: '`ipfix` [deprecated (*)] - describes the settings - related to the IPFIX-based flow reporter when `spec.agent.type` - is set to `IPFIX`.' + related to the Ipfix-based flow reporter when `spec.agent.type` + is set to `Ipfix`.' properties: cacheActiveTimeout: default: 20s @@ -5368,8 +5368,8 @@ spec: forceSampleAll: default: false description: '`forceSampleAll` allows disabling sampling in - the IPFIX-based flow reporter. It is not recommended to - sample all the traffic with IPFIX, as it might generate + the Ipfix-based flow reporter. It is not recommended to + sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' @@ -5377,7 +5377,7 @@ spec: ovnKubernetes: description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is - used when using OVN''s IPFIX exports, without OpenShift. + used when using OVN''s Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: @@ -5403,23 +5403,23 @@ spec: it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you - can use the eBPF Agent instead of IPFIX.' + can use the eBPF Agent instead of Ipfix.' format: int32 minimum: 2 type: integer type: object type: - default: eBPF + default: Ebpf description: '`type` selects the flows tracing agent. Possible - values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `eBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` + values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
+ - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
+ `Ebpf` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + support exporting Ipfix, but they would require manual configuration).' enum: - - eBPF - - IPFIX + - Ebpf + - Ipfix type: string type: object consolePlugin: @@ -6146,19 +6146,19 @@ spec: to send enriched flows to.' properties: ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. + description: Ipfix configuration, such as the IP address and + port to send enriched Ipfix flows to. properties: targetHost: default: "" - description: Address of the IPFIX external receiver + description: Address of the Ipfix external receiver type: string targetPort: - description: Port for the IPFIX external receiver + description: Port for the Ipfix external receiver type: integer transport: description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. + for the Ipfix connection, defaults to `TCP`. enum: - TCP - UDP @@ -6345,10 +6345,10 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `Kafka` and `IPFIX`.' + options are `Kafka` and `Ipfix`.' enum: - Kafka - - IPFIX + - Ipfix type: string required: - type diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 43fb8d7f6..f953fcc63 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -378,7 +378,7 @@ metadata: }, "sampling": 50 }, - "type": "eBPF" + "type": "Ebpf" }, "consolePlugin": { "autoscaler": { @@ -526,7 +526,7 @@ metadata: capabilities: Seamless Upgrades categories: Monitoring, Networking console.openshift.io/plugins: '["netobserv-plugin"]' - containerImage: quay.io/netobserv/network-observability-operator:1.0.4 + containerImage: quay.io/amoghrd/network-observability-operator:main createdAt: ':created-at:' description: Network flows collector and monitoring solution operatorframework.io/suggested-namespace: openshift-netobserv-operator @@ -598,7 +598,7 @@ spec: ## Configuration - The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). + The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -616,7 +616,7 @@ spec: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. - - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). + - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -886,7 +886,7 @@ spec: - name: DOWNSTREAM_DEPLOYMENT value: "false" - name: PROFILING_BIND_ADDRESS - image: quay.io/netobserv/network-observability-operator:1.0.4 + image: quay.io/amoghrd/network-observability-operator:main imagePullPolicy: Always livenessProbe: httpGet: diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 3c3224412..45b0814fe 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -5093,7 +5093,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) + name: Sampling (Ebpf) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -5133,7 +5133,7 @@ spec: properties: ebpf: description: '`ebpf` describes the settings related to the eBPF-based - flow reporter when `spec.agent.type` is set to `eBPF`.' + flow reporter when `spec.agent.type` is set to `Ebpf`.' properties: cacheActiveTimeout: default: 5s @@ -5324,8 +5324,8 @@ spec: type: object ipfix: description: '`ipfix` [deprecated (*)] - describes the settings - related to the IPFIX-based flow reporter when `spec.agent.type` - is set to `IPFIX`.' + related to the Ipfix-based flow reporter when `spec.agent.type` + is set to `Ipfix`.' properties: cacheActiveTimeout: default: 20s @@ -5354,8 +5354,8 @@ spec: forceSampleAll: default: false description: '`forceSampleAll` allows disabling sampling in - the IPFIX-based flow reporter. It is not recommended to - sample all the traffic with IPFIX, as it might generate + the Ipfix-based flow reporter. It is not recommended to + sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' @@ -5363,7 +5363,7 @@ spec: ovnKubernetes: description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is - used when using OVN''s IPFIX exports, without OpenShift. + used when using OVN''s Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: @@ -5389,23 +5389,23 @@ spec: it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you - can use the eBPF Agent instead of IPFIX.' + can use the eBPF Agent instead of Ipfix.' format: int32 minimum: 2 type: integer type: object type: - default: eBPF + default: Ebpf description: '`type` selects the flows tracing agent. Possible - values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- - `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
- `eBPF` is recommended as it offers better performances and should - work regardless of the CNI installed on the cluster. `IPFIX` + values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
+ - `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
+ `Ebpf` is recommended as it offers better performances and should + work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they - support exporting IPFIX, but they would require manual configuration).' + support exporting Ipfix, but they would require manual configuration).' enum: - - eBPF - - IPFIX + - Ebpf + - Ipfix type: string type: object consolePlugin: @@ -6132,19 +6132,19 @@ spec: to send enriched flows to.' properties: ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. + description: Ipfix configuration, such as the IP address and + port to send enriched Ipfix flows to. properties: targetHost: default: "" - description: Address of the IPFIX external receiver + description: Address of the Ipfix external receiver type: string targetPort: - description: Port for the IPFIX external receiver + description: Port for the Ipfix external receiver type: integer transport: description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. + for the Ipfix connection, defaults to `TCP`. enum: - TCP - UDP @@ -6331,10 +6331,10 @@ spec: type: object type: description: '`type` selects the type of exporters. The available - options are `Kafka` and `IPFIX`.' + options are `Kafka` and `Ipfix`.' enum: - Kafka - - IPFIX + - Ipfix type: string required: - type diff --git a/config/descriptions/ocp.md b/config/descriptions/ocp.md index 6e2e48cbc..e0e86d923 100644 --- a/config/descriptions/ocp.md +++ b/config/descriptions/ocp.md @@ -28,7 +28,7 @@ oc apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents/252b ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -44,7 +44,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md index 04c610fcb..30e670499 100644 --- a/config/descriptions/upstream.md +++ b/config/descriptions/upstream.md @@ -32,7 +32,7 @@ kubectl apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -50,7 +50,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index f69dc045d..fb1512921 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -14,7 +14,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/netobserv/network-observability-operator - newTag: 1.0.4 + newName: quay.io/amoghrd/network-observability-operator + newTag: main commonLabels: app: netobserv-operator diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml index ff3d2dfeb..9dc280d35 100644 --- a/config/samples/flows_v1beta2_flowcollector.yaml +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -6,7 +6,7 @@ spec: namespace: netobserv deploymentModel: Direct agent: - type: eBPF + type: Ebpf ebpf: imagePullPolicy: IfNotPresent sampling: 50 @@ -137,7 +137,7 @@ spec: # address: "kafka-cluster-kafka-bootstrap.netobserv" # topic: netobserv-flows-export # or - # - type: IPFIX + # - type: Ipfix # ipfix: # targetHost: "ipfix-collector.ipfix.svc.cluster.local" # targetPort: 4739 diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index a0e2543d1..8245c9724 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -344,16 +344,16 @@ func (b *builder) setLokiConfig(lconf *config.LokiConfig) { } func (b *builder) setFrontendConfig(fconf *config.FrontendConfig) { - if helper.UseEBPF(b.desired) { - if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { + if helper.UseEbpf(b.desired) { + if helper.IsPktDropEnabled(&b.desired.Agent.Ebpf) { fconf.Features = append(fconf.Features, "pktDrop") } - if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { + if helper.IsDNSTrackingEnabled(&b.desired.Agent.Ebpf) { fconf.Features = append(fconf.Features, "dnsTracking") } - if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { + if helper.IsFlowRTTEnabled(&b.desired.Agent.Ebpf) { fconf.Features = append(fconf.Features, "flowRTT") } } diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index 2fbb72e14..5fcb357e7 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -105,7 +105,7 @@ func (c *AgentController) Reconcile( if err != nil { return fmt.Errorf("fetching current EBPF Agent: %w", err) } - if !helper.UseEBPF(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { + if !helper.UseEbpf(&target.Spec) || c.PreviousPrivilegedNamespace() != c.PrivilegedNamespace() { if current == nil { rlog.Info("nothing to do, as the requested agent is not eBPF", "currentAgent", target.Spec.Agent) @@ -125,7 +125,7 @@ func (c *AgentController) Reconcile( current = nil } - if err := c.permissions.Reconcile(ctx, &target.Spec.Agent.EBPF); err != nil { + if err := c.permissions.Reconcile(ctx, &target.Spec.Agent.Ebpf); err != nil { return fmt.Errorf("reconciling permissions: %w", err) } desired, err := c.desired(ctx, target, rlog) @@ -175,7 +175,7 @@ func newMountPropagationMode(m corev1.MountPropagationMode) *corev1.MountPropaga } func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCollector, rlog logr.Logger) (*v1.DaemonSet, error) { - if coll == nil || !helper.UseEBPF(&coll.Spec) { + if coll == nil || !helper.UseEbpf(&coll.Spec) { return nil, nil } version := helper.ExtractVersion(c.config.EBPFAgentImage) @@ -187,7 +187,7 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol volumeMounts := c.volumes.GetMounts() volumes := c.volumes.GetVolumes() - if helper.IsPrivileged(&coll.Spec.Agent.EBPF) { + if helper.IsPrivileged(&coll.Spec.Agent.Ebpf) { volume := corev1.Volume{ Name: bpfNetNSMountName, VolumeSource: corev1.VolumeSource{ @@ -206,8 +206,8 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol volumeMounts = append(volumeMounts, volumeMount) } - if helper.IsFeatureEnabled(&coll.Spec.Agent.EBPF, flowslatest.PacketDrop) { - if !coll.Spec.Agent.EBPF.Privileged { + if helper.IsFeatureEnabled(&coll.Spec.Agent.Ebpf, flowslatest.PacketDrop) { + if !coll.Spec.Agent.Ebpf.Privileged { rlog.Error(fmt.Errorf("invalid configuration"), "To use PacketsDrop feature privileged mode needs to be enabled") } else { volume := corev1.Volume{ @@ -257,8 +257,8 @@ func (c *AgentController) desired(ctx context.Context, coll *flowslatest.FlowCol Containers: []corev1.Container{{ Name: constants.EBPFAgentName, Image: c.config.EBPFAgentImage, - ImagePullPolicy: corev1.PullPolicy(coll.Spec.Agent.EBPF.ImagePullPolicy), - Resources: coll.Spec.Agent.EBPF.Resources, + ImagePullPolicy: corev1.PullPolicy(coll.Spec.Agent.Ebpf.ImagePullPolicy), + Resources: coll.Spec.Agent.Ebpf.Resources, SecurityContext: c.securityContext(coll), Env: env, VolumeMounts: volumeMounts, @@ -277,9 +277,9 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC corev1.EnvVar{Name: envExport, Value: exportKafka}, corev1.EnvVar{Name: envKafkaBrokers, Value: coll.Spec.Kafka.Address}, corev1.EnvVar{Name: envKafkaTopic, Value: coll.Spec.Kafka.Topic}, - corev1.EnvVar{Name: envKafkaBatchSize, Value: strconv.Itoa(coll.Spec.Agent.EBPF.KafkaBatchSize)}, + corev1.EnvVar{Name: envKafkaBatchSize, Value: strconv.Itoa(coll.Spec.Agent.Ebpf.KafkaBatchSize)}, // For easier user configuration, we can assume a constant message size per flow (~100B in protobuf) - corev1.EnvVar{Name: envKafkaBatchMessages, Value: strconv.Itoa(coll.Spec.Agent.EBPF.KafkaBatchSize / averageMessageSize)}, + corev1.EnvVar{Name: envKafkaBatchMessages, Value: strconv.Itoa(coll.Spec.Agent.Ebpf.KafkaBatchSize / averageMessageSize)}, ) if coll.Spec.Kafka.TLS.Enable { // Annotate pod with certificate reference so that it is reloaded if modified @@ -374,8 +374,8 @@ func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *core RunAsUser: ptr.To(int64(0)), } - if coll.Spec.Agent.EBPF.Privileged { - sc.Privileged = &coll.Spec.Agent.EBPF.Privileged + if coll.Spec.Agent.Ebpf.Privileged { + sc.Privileged = &coll.Spec.Agent.Ebpf.Privileged } else { sc.Capabilities = &corev1.Capabilities{Add: permissions.AllowedCapabilities} } @@ -386,42 +386,42 @@ func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *core func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1.EnvVar { var config []corev1.EnvVar - if coll.Spec.Agent.EBPF.CacheActiveTimeout != "" { + if coll.Spec.Agent.Ebpf.CacheActiveTimeout != "" { config = append(config, corev1.EnvVar{ Name: envCacheActiveTimeout, - Value: coll.Spec.Agent.EBPF.CacheActiveTimeout, + Value: coll.Spec.Agent.Ebpf.CacheActiveTimeout, }) } - if coll.Spec.Agent.EBPF.CacheMaxFlows != 0 { + if coll.Spec.Agent.Ebpf.CacheMaxFlows != 0 { config = append(config, corev1.EnvVar{ Name: envCacheMaxFlows, - Value: strconv.Itoa(int(coll.Spec.Agent.EBPF.CacheMaxFlows)), + Value: strconv.Itoa(int(coll.Spec.Agent.Ebpf.CacheMaxFlows)), }) } - if coll.Spec.Agent.EBPF.LogLevel != "" { + if coll.Spec.Agent.Ebpf.LogLevel != "" { config = append(config, corev1.EnvVar{ Name: envLogLevel, - Value: coll.Spec.Agent.EBPF.LogLevel, + Value: coll.Spec.Agent.Ebpf.LogLevel, }) } - if len(coll.Spec.Agent.EBPF.Interfaces) > 0 { + if len(coll.Spec.Agent.Ebpf.Interfaces) > 0 { config = append(config, corev1.EnvVar{ Name: envInterfaces, - Value: strings.Join(coll.Spec.Agent.EBPF.Interfaces, envListSeparator), + Value: strings.Join(coll.Spec.Agent.Ebpf.Interfaces, envListSeparator), }) } - if len(coll.Spec.Agent.EBPF.ExcludeInterfaces) > 0 { + if len(coll.Spec.Agent.Ebpf.ExcludeInterfaces) > 0 { config = append(config, corev1.EnvVar{ Name: envExcludeInterfaces, - Value: strings.Join(coll.Spec.Agent.EBPF.ExcludeInterfaces, envListSeparator), + Value: strings.Join(coll.Spec.Agent.Ebpf.ExcludeInterfaces, envListSeparator), }) } - sampling := coll.Spec.Agent.EBPF.Sampling + sampling := coll.Spec.Agent.Ebpf.Sampling if sampling != nil && *sampling > 1 { config = append(config, corev1.EnvVar{ Name: envSampling, @@ -429,7 +429,7 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 }) } - if helper.IsFlowRTTEnabled(&coll.Spec.Agent.EBPF) { + if helper.IsFlowRTTEnabled(&coll.Spec.Agent.Ebpf) { config = append(config, corev1.EnvVar{ Name: envEnableFlowRTT, Value: "true", @@ -438,8 +438,8 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 // set GOMEMLIMIT which allows specifying a soft memory cap to force GC when resource limit is reached // to prevent OOM - if coll.Spec.Agent.EBPF.Resources.Limits.Memory() != nil { - if memLimit, ok := coll.Spec.Agent.EBPF.Resources.Limits.Memory().AsInt64(); ok { + if coll.Spec.Agent.Ebpf.Resources.Limits.Memory() != nil { + if memLimit, ok := coll.Spec.Agent.Ebpf.Resources.Limits.Memory().AsInt64(); ok { // we will set the GOMEMLIMIT to current memlimit - 10% as a headroom to account for // memory sources the Go runtime is unaware of memLimit -= int64(float64(memLimit) * 0.1) @@ -447,14 +447,14 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 } } - if helper.IsPktDropEnabled(&coll.Spec.Agent.EBPF) { + if helper.IsPktDropEnabled(&coll.Spec.Agent.Ebpf) { config = append(config, corev1.EnvVar{ Name: envEnablePktDrop, Value: "true", }) } - if helper.IsDNSTrackingEnabled(&coll.Spec.Agent.EBPF) { + if helper.IsDNSTrackingEnabled(&coll.Spec.Agent.Ebpf) { config = append(config, corev1.EnvVar{ Name: envEnableDNSTracking, Value: "true", @@ -465,7 +465,7 @@ func (c *AgentController) setEnvConfig(coll *flowslatest.FlowCollector) []corev1 dedupJustMark := dedupeJustMarkDefault // we need to sort env map to keep idempotency, // as equal maps could be iterated in different order - for _, pair := range helper.KeySorted(coll.Spec.Agent.EBPF.Debug.Env) { + for _, pair := range helper.KeySorted(coll.Spec.Agent.Ebpf.Debug.Env) { k, v := pair[0], pair[1] if k == envDedupe { dedup = v diff --git a/controllers/ebpf/internal/permissions/permissions.go b/controllers/ebpf/internal/permissions/permissions.go index 4b41a8c31..76073be6a 100644 --- a/controllers/ebpf/internal/permissions/permissions.go +++ b/controllers/ebpf/internal/permissions/permissions.go @@ -33,7 +33,7 @@ func NewReconciler(cmn *reconcilers.Common) Reconciler { return Reconciler{Common: *cmn} } -func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollectorEBPF) error { +func (c *Reconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowCollectorEbpf) error { log.IntoContext(ctx, log.FromContext(ctx).WithName("permissions")) if err := c.reconcileNamespace(ctx); err != nil { @@ -121,7 +121,7 @@ func (c *Reconciler) reconcileServiceAccount(ctx context.Context) error { } func (c *Reconciler) reconcileVendorPermissions( - ctx context.Context, desired *flowslatest.FlowCollectorEBPF, + ctx context.Context, desired *flowslatest.FlowCollectorEbpf, ) error { if c.UseOpenShiftSCC { return c.reconcileOpenshiftPermissions(ctx, desired) @@ -130,7 +130,7 @@ func (c *Reconciler) reconcileVendorPermissions( } func (c *Reconciler) reconcileOpenshiftPermissions( - ctx context.Context, desired *flowslatest.FlowCollectorEBPF, + ctx context.Context, desired *flowslatest.FlowCollectorEbpf, ) error { rlog := log.FromContext(ctx, "securityContextConstraints", constants.EBPFSecurityContext) diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index 1239068b3..cac23d223 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -148,12 +148,12 @@ func (r *FlowCollectorReconciler) Reconcile(ctx context.Context, _ ctrl.Request) // OVS config map for CNO if r.availableAPIs.HasCNO() { - ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.IPFIX.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) + ovsConfigController := ovs.NewFlowsConfigCNOController(&reconcilersInfo, desired.Spec.Agent.Ipfix.ClusterNetworkOperator.Namespace, ovsFlowsConfigMapName) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileCNOFailed(err), desired) } } else { - ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.IPFIX.OVNKubernetes) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&reconcilersInfo, desired.Spec.Agent.Ipfix.OVNKubernetes) if err := ovsConfigController.Reconcile(ctx, desired); err != nil { return ctrl.Result{}, r.failure(ctx, conditions.ReconcileOVNKFailed(err), desired) } @@ -380,7 +380,7 @@ func (r *FlowCollectorReconciler) finalize(ctx context.Context, desired *flowsla if !r.availableAPIs.HasCNO() { ns := getNamespaceName(desired) info := r.newCommonInfo(ctx, desired, ns, ns, nil, func(b bool) {}, func(b bool) {}) - ovsConfigController := ovs.NewFlowsConfigOVNKController(&info, desired.Spec.Agent.IPFIX.OVNKubernetes) + ovsConfigController := ovs.NewFlowsConfigOVNKController(&info, desired.Spec.Agent.Ipfix.OVNKubernetes) if err := ovsConfigController.Finalize(ctx, desired); err != nil { return fmt.Errorf("failed to finalize ovn-kubernetes reconciler: %w", err) } diff --git a/controllers/flowcollector_controller_ebpf_test.go b/controllers/flowcollector_controller_ebpf_test.go index ff948686d..fe9a2d32a 100644 --- a/controllers/flowcollector_controller_ebpf_test.go +++ b/controllers/flowcollector_controller_ebpf_test.go @@ -61,8 +61,8 @@ func flowCollectorEBPFSpecs() { LogLevel: "error", }, Agent: flowslatest.FlowCollectorAgent{ - Type: "eBPF", - EBPF: flowslatest.FlowCollectorEBPF{ + Type: "EBPF", + Ebpf: flowslatest.FlowCollectorEbpf{ Sampling: ptr.To(int32(123)), CacheActiveTimeout: "15s", CacheMaxFlows: 100, @@ -148,9 +148,9 @@ func flowCollectorEBPFSpecs() { It("Should update fields that have changed", func() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { - Expect(*fc.Spec.Agent.EBPF.Sampling).To(Equal(int32(123))) - *fc.Spec.Agent.EBPF.Sampling = 4 - fc.Spec.Agent.EBPF.Privileged = true + Expect(*fc.Spec.Agent.Ebpf.Sampling).To(Equal(int32(123))) + *fc.Spec.Agent.Ebpf.Sampling = 4 + fc.Spec.Agent.Ebpf.Privileged = true }) ds := appsv1.DaemonSet{} @@ -281,7 +281,7 @@ func flowCollectorEBPFKafkaSpecs() { ObjectMeta: metav1.ObjectMeta{Name: crKey.Name}, Spec: flowslatest.FlowCollectorSpec{ Namespace: operatorNamespace, - Agent: flowslatest.FlowCollectorAgent{Type: "eBPF"}, + Agent: flowslatest.FlowCollectorAgent{Type: "Ebpf"}, DeploymentModel: flowslatest.DeploymentModelKafka, Kafka: flowslatest.FlowCollectorKafka{ Address: "kafka-cluster-kafka-bootstrap", diff --git a/controllers/flowcollector_controller_iso_test.go b/controllers/flowcollector_controller_iso_test.go index f1bb35053..1760063d9 100644 --- a/controllers/flowcollector_controller_iso_test.go +++ b/controllers/flowcollector_controller_iso_test.go @@ -87,8 +87,8 @@ func flowCollectorIsoSpecs() { DropUnusedFields: ptr.To(false), }, Agent: flowslatest.FlowCollectorAgent{ - Type: "eBPF", - IPFIX: flowslatest.FlowCollectorIPFIX{ + Type: "Ebpf", + Ipfix: flowslatest.FlowCollectorIpfix{ Sampling: 2, // 0 is forbidden here CacheActiveTimeout: "5s", CacheMaxFlows: 100, @@ -102,7 +102,7 @@ func flowCollectorIsoSpecs() { ContainerName: "test", }, }, - EBPF: flowslatest.FlowCollectorEBPF{ + Ebpf: flowslatest.FlowCollectorEbpf{ Sampling: &zero, CacheActiveTimeout: "5s", CacheMaxFlows: 100, diff --git a/controllers/flowcollector_controller_test.go b/controllers/flowcollector_controller_test.go index 6a10a58ee..028fa83bd 100644 --- a/controllers/flowcollector_controller_test.go +++ b/controllers/flowcollector_controller_test.go @@ -122,8 +122,8 @@ func flowCollectorControllerSpecs() { }, }, Agent: flowslatest.FlowCollectorAgent{ - Type: "IPFIX", - IPFIX: flowslatest.FlowCollectorIPFIX{ + Type: "Ipfix", + Ipfix: flowslatest.FlowCollectorIpfix{ Sampling: 200, }, }, @@ -283,7 +283,7 @@ func flowCollectorControllerSpecs() { }, } fc.Spec.Loki = flowslatest.FlowCollectorLoki{} - fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{ + fc.Spec.Agent.Ipfix = flowslatest.FlowCollectorIpfix{ Sampling: 400, CacheActiveTimeout: "30s", CacheMaxFlows: 1000, @@ -399,7 +399,7 @@ func flowCollectorControllerSpecs() { if err := k8sClient.Get(ctx, crKey, &fc); err != nil { return err } - fc.Spec.Agent.IPFIX.Sampling = 1 + fc.Spec.Agent.Ipfix.Sampling = 1 return k8sClient.Update(ctx, &fc) }).Should(Satisfy(func(err error) bool { return err != nil && strings.Contains(err.Error(), "spec.agent.ipfix.sampling: Invalid value: 1") @@ -410,8 +410,8 @@ func flowCollectorControllerSpecs() { if err := k8sClient.Get(ctx, crKey, &fc); err != nil { return err } - fc.Spec.Agent.IPFIX.Sampling = 10 - fc.Spec.Agent.IPFIX.ForceSampleAll = true + fc.Spec.Agent.Ipfix.Sampling = 10 + fc.Spec.Agent.Ipfix.ForceSampleAll = true return k8sClient.Update(ctx, &fc) }).Should(Succeed()) @@ -888,7 +888,7 @@ func flowCollectorControllerSpecs() { UpdateCR(crKey, func(fc *flowslatest.FlowCollector) { fc.Spec.Processor.Port = 9999 fc.Spec.Namespace = otherNamespace - fc.Spec.Agent.IPFIX = flowslatest.FlowCollectorIPFIX{ + fc.Spec.Agent.Ipfix = flowslatest.FlowCollectorIpfix{ Sampling: 200, } }) diff --git a/controllers/flowlogspipeline/flp_common_objects.go b/controllers/flowlogspipeline/flp_common_objects.go index dace20543..dad710e69 100644 --- a/controllers/flowlogspipeline/flp_common_objects.go +++ b/controllers/flowlogspipeline/flp_common_objects.go @@ -119,7 +119,7 @@ func (b *builder) serviceMonitorName() string { return serviceMonitorName(b.conf func (b *builder) prometheusRuleName() string { return prometheusRuleName(b.confKind) } func (b *builder) portProtocol() corev1.Protocol { - if helper.UseEBPF(b.desired) { + if helper.UseEbpf(b.desired) { return corev1.ProtocolTCP } return corev1.ProtocolUDP @@ -395,7 +395,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P }, } - if helper.IsPktDropEnabled(&b.desired.Agent.EBPF) { + if helper.IsPktDropEnabled(&b.desired.Agent.Ebpf) { outputPktDropFields := []api.OutputField{ { Name: "PktDropBytes", @@ -427,7 +427,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P outputFields = append(outputFields, outputPktDropFields...) } - if helper.IsDNSTrackingEnabled(&b.desired.Agent.EBPF) { + if helper.IsDNSTrackingEnabled(&b.desired.Agent.Ebpf) { outDNSTrackingFields := []api.OutputField{ { Name: "DnsFlagsResponseCode", @@ -441,7 +441,7 @@ func (b *builder) addConnectionTracking(indexFields []string, lastStage config.P outputFields = append(outputFields, outDNSTrackingFields...) } - if helper.IsFlowRTTEnabled(&b.desired.Agent.EBPF) { + if helper.IsFlowRTTEnabled(&b.desired.Agent.Ebpf) { outputFields = append(outputFields, api.OutputField{ Name: "MaxTimeFlowRttNs", Operation: "max", @@ -526,7 +526,7 @@ func (b *builder) addTransformFilter(lastStage config.PipelineBuilderStage) conf // Filter-out unused fields? if helper.PtrBool(b.desired.Processor.DropUnusedFields) { - if helper.UseIPFIX(b.desired) { + if helper.UseIpfix(b.desired) { rules := filters.GetOVSGoflowUnusedRules() transformFilterRules = append(transformFilterRules, rules...) } @@ -546,7 +546,7 @@ func (b *builder) addCustomExportStages(enrichedStage *config.PipelineBuilderSta b.createKafkaWriteStage(fmt.Sprintf("kafka-export-%d", i), &exporter.Kafka, enrichedStage) } if exporter.Type == flowslatest.IpfixExporter { - createIPFIXWriteStage(fmt.Sprintf("IPFIX-export-%d", i), &exporter.IPFIX, enrichedStage) + createIpfixWriteStage(fmt.Sprintf("IPFIX-export-%d", i), &exporter.IPFIX, enrichedStage) } } } @@ -560,11 +560,11 @@ func (b *builder) createKafkaWriteStage(name string, spec *flowslatest.FlowColle }) } -func createIPFIXWriteStage(name string, spec *flowslatest.FlowCollectorIPFIXReceiver, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { +func createIpfixWriteStage(name string, spec *flowslatest.FlowCollectorIpfixReceiver, fromStage *config.PipelineBuilderStage) config.PipelineBuilderStage { return fromStage.WriteIpfix(name, api.WriteIpfix{ TargetHost: spec.TargetHost, TargetPort: spec.TargetPort, - Transport: getIPFIXTransport(spec.Transport), + Transport: getIpfixTransport(spec.Transport), EnterpriseID: 2, }) } @@ -599,7 +599,7 @@ func (b *builder) getKafkaSASL(sasl *flowslatest.SASLConfig, volumePrefix string } } -func getIPFIXTransport(transport string) string { +func getIpfixTransport(transport string) string { switch transport { case "UDP": return "udp" diff --git a/controllers/flowlogspipeline/flp_ingest_objects.go b/controllers/flowlogspipeline/flp_ingest_objects.go index a8c3cec8f..dc516caa8 100644 --- a/controllers/flowlogspipeline/flp_ingest_objects.go +++ b/controllers/flowlogspipeline/flp_ingest_objects.go @@ -51,7 +51,7 @@ func (b *ingestBuilder) configMap() (*corev1.ConfigMap, string, error) { func (b *ingestBuilder) buildPipelineConfig() ([]config.Stage, []config.StageParam, error) { var pipeline config.PipelineBuilderStage - if helper.UseIPFIX(b.generic.desired) { + if helper.UseIpfix(b.generic.desired) { // IPFIX collector pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{ Port: int(b.generic.desired.Processor.Port), diff --git a/controllers/flowlogspipeline/flp_ingest_reconciler.go b/controllers/flowlogspipeline/flp_ingest_reconciler.go index 195dd465b..f451b2ffd 100644 --- a/controllers/flowlogspipeline/flp_ingest_reconciler.go +++ b/controllers/flowlogspipeline/flp_ingest_reconciler.go @@ -79,7 +79,7 @@ func (r *flpIngesterReconciler) reconcile(ctx context.Context, desired *flowslat } // Ingester only used with Kafka and without eBPF - if !helper.UseKafka(&desired.Spec) || helper.UseEBPF(&desired.Spec) { + if !helper.UseKafka(&desired.Spec) || helper.UseEbpf(&desired.Spec) { r.Managed.TryDeleteAll(ctx) return nil } diff --git a/controllers/flowlogspipeline/flp_monolith_objects.go b/controllers/flowlogspipeline/flp_monolith_objects.go index e846d1e07..c037bfc4d 100644 --- a/controllers/flowlogspipeline/flp_monolith_objects.go +++ b/controllers/flowlogspipeline/flp_monolith_objects.go @@ -52,7 +52,7 @@ func (b *monolithBuilder) configMap() (*corev1.ConfigMap, string, error) { func (b *monolithBuilder) buildPipelineConfig() ([]config.Stage, []config.StageParam, error) { var pipeline config.PipelineBuilderStage - if helper.UseIPFIX(b.generic.desired) { + if helper.UseIpfix(b.generic.desired) { // IPFIX collector pipeline = config.NewCollectorPipeline("ipfix", api.IngestCollector{ Port: int(b.generic.desired.Processor.Port), diff --git a/controllers/flowlogspipeline/flp_test.go b/controllers/flowlogspipeline/flp_test.go index 8d61e552d..4198c6a46 100644 --- a/controllers/flowlogspipeline/flp_test.go +++ b/controllers/flowlogspipeline/flp_test.go @@ -59,7 +59,7 @@ func getConfig(lokiMode ...string) flowslatest.FlowCollectorSpec { return flowslatest.FlowCollectorSpec{ DeploymentModel: flowslatest.DeploymentModelDirect, - Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIPFIX}, + Agent: flowslatest.FlowCollectorAgent{Type: flowslatest.AgentIpfix}, Processor: flowslatest.FlowCollectorFLP{ Port: 2055, ImagePullPolicy: string(pullPolicy), @@ -970,7 +970,7 @@ func TestPipelineWithExporter(t *testing.T) { cfg.Exporters = append(cfg.Exporters, &flowslatest.FlowCollectorExporter{ Type: flowslatest.IpfixExporter, - IPFIX: flowslatest.FlowCollectorIPFIXReceiver{ + IPFIX: flowslatest.FlowCollectorIpfixReceiver{ TargetHost: "ipfix-receiver-test", TargetPort: 9999, Transport: "TCP", diff --git a/controllers/flowlogspipeline/flp_transfo_objects.go b/controllers/flowlogspipeline/flp_transfo_objects.go index 7cab884c6..603f052aa 100644 --- a/controllers/flowlogspipeline/flp_transfo_objects.go +++ b/controllers/flowlogspipeline/flp_transfo_objects.go @@ -57,7 +57,7 @@ func (b *transfoBuilder) buildPipelineConfig() ([]config.Stage, []config.StagePa // For now, we leave this communication via JSON and just setup protobuf ingestion when // the transformer is communicating directly via eBPF agent decoder := api.Decoder{Type: "protobuf"} - if helper.UseIPFIX(b.generic.desired) { + if helper.UseIpfix(b.generic.desired) { decoder = api.Decoder{Type: "json"} } pipeline := config.NewKafkaPipeline("kafka-read", api.IngestKafka{ diff --git a/controllers/ovs/flowsconfig_cno_reconciler.go b/controllers/ovs/flowsconfig_cno_reconciler.go index 1916372d9..c8843dd49 100644 --- a/controllers/ovs/flowsconfig_cno_reconciler.go +++ b/controllers/ovs/flowsconfig_cno_reconciler.go @@ -38,7 +38,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl if err != nil { return err } - if !helper.UseIPFIX(&target.Spec) { + if !helper.UseIpfix(&target.Spec) { if current == nil { return nil } @@ -58,7 +58,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl // compare current and desired if current == nil { - rlog.Info("Provided IPFIX configuration. Creating " + c.ovsConfigMapName + " ConfigMap") + rlog.Info("Provided Ipfix configuration. Creating " + c.ovsConfigMapName + " ConfigMap") cm, err := c.flowsConfigMap(desired) if err != nil { return err @@ -67,7 +67,7 @@ func (c *FlowsConfigCNOController) Reconcile(ctx context.Context, target *flowsl } if desired != nil && *desired != *current { - rlog.Info("Provided IPFIX configuration differs current configuration. Updating") + rlog.Info("Provided Ipfix configuration differs current configuration. Updating") cm, err := c.flowsConfigMap(desired) if err != nil { return err @@ -101,11 +101,11 @@ func (c *FlowsConfigCNOController) current(ctx context.Context) (*flowsConfig, e func (c *FlowsConfigCNOController) desired( ctx context.Context, coll *flowslatest.FlowCollector) *flowsConfig { - corrected := coll.Spec.Agent.IPFIX.DeepCopy() + corrected := coll.Spec.Agent.Ipfix.DeepCopy() corrected.Sampling = getSampling(ctx, corrected) return &flowsConfig{ - FlowCollectorIPFIX: *corrected, + FlowCollectorIpfix: *corrected, NodePort: coll.Spec.Processor.Port, } } diff --git a/controllers/ovs/flowsconfig_ovnk_reconciler.go b/controllers/ovs/flowsconfig_ovnk_reconciler.go index 926aed71c..ed4ffb42d 100644 --- a/controllers/ovs/flowsconfig_ovnk_reconciler.go +++ b/controllers/ovs/flowsconfig_ovnk_reconciler.go @@ -51,15 +51,15 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows Name: c.config.DaemonSetName, Namespace: c.config.Namespace, }, ds); err != nil { - if kerr.IsNotFound(err) && !helper.UseIPFIX(&target.Spec) { - // If we don't want IPFIX and ovn-k daemonset is not found, assume there no ovn-k, just succeed + if kerr.IsNotFound(err) && !helper.UseIpfix(&target.Spec) { + // If we don't want Ipfix and ovn-k daemonset is not found, assume there no ovn-k, just succeed rlog.Info("Skip reconciling OVN: OVN DaemonSet not found") return nil } return fmt.Errorf("retrieving %s/%s daemonset: %w", c.config.Namespace, c.config.DaemonSetName, err) } - ovnkubeNode := helper.FindContainer(&ds.Spec.Template.Spec, target.Spec.Agent.IPFIX.OVNKubernetes.ContainerName) + ovnkubeNode := helper.FindContainer(&ds.Spec.Template.Spec, target.Spec.Agent.Ipfix.OVNKubernetes.ContainerName) if ovnkubeNode == nil { return errors.New("could not find container ovnkube-node") } @@ -71,7 +71,7 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows } } if anyUpdate { - rlog.Info("Provided IPFIX configuration differs current configuration. Updating") + rlog.Info("Provided Ipfix configuration differs current configuration. Updating") return c.Update(ctx, ds) } @@ -80,21 +80,21 @@ func (c *FlowsConfigOVNKController) updateEnv(ctx context.Context, target *flows } func (c *FlowsConfigOVNKController) desiredEnv(ctx context.Context, coll *flowslatest.FlowCollector) (map[string]string, error) { - cacheTimeout, err := time.ParseDuration(coll.Spec.Agent.IPFIX.CacheActiveTimeout) + cacheTimeout, err := time.ParseDuration(coll.Spec.Agent.Ipfix.CacheActiveTimeout) if err != nil { return nil, err } - sampling := getSampling(ctx, &coll.Spec.Agent.IPFIX) + sampling := getSampling(ctx, &coll.Spec.Agent.Ipfix) envs := map[string]string{ "OVN_IPFIX_TARGETS": "", "OVN_IPFIX_CACHE_ACTIVE_TIMEOUT": strconv.Itoa(int(cacheTimeout.Seconds())), - "OVN_IPFIX_CACHE_MAX_FLOWS": strconv.Itoa(int(coll.Spec.Agent.IPFIX.CacheMaxFlows)), + "OVN_IPFIX_CACHE_MAX_FLOWS": strconv.Itoa(int(coll.Spec.Agent.Ipfix.CacheMaxFlows)), "OVN_IPFIX_SAMPLING": strconv.Itoa(int(sampling)), } - if !helper.UseIPFIX(&coll.Spec) { - // No IPFIX => leave target empty and return + if !helper.UseIpfix(&coll.Spec) { + // No Ipfix => leave target empty and return return envs, nil } diff --git a/controllers/ovs/flowsconfig_types.go b/controllers/ovs/flowsconfig_types.go index b84a20957..846cbc5c6 100644 --- a/controllers/ovs/flowsconfig_types.go +++ b/controllers/ovs/flowsconfig_types.go @@ -12,7 +12,7 @@ import ( ) type flowsConfig struct { - flowslatest.FlowCollectorIPFIX `json:",inline" mapstructure:",squash"` + flowslatest.FlowCollectorIpfix `json:",inline" mapstructure:",squash"` SharedTarget string `json:"sharedTarget,omitempty" mapstructure:"sharedTarget,omitempty"` NodePort int32 `json:"nodePort,omitempty" mapstructure:"nodePort,omitempty"` } @@ -41,7 +41,7 @@ func (fc *flowsConfig) asStringMap() (map[string]string, error) { // getSampling returns the configured sampling, or 1 if ipfix.forceSampleAll is true // Note that configured sampling has a minimum value of 2. // See also https://bugzilla.redhat.com/show_bug.cgi?id=2103136 , https://bugzilla.redhat.com/show_bug.cgi?id=2104943 -func getSampling(ctx context.Context, cfg *flowslatest.FlowCollectorIPFIX) int32 { +func getSampling(ctx context.Context, cfg *flowslatest.FlowCollectorIpfix) int32 { rlog := log.FromContext(ctx) if cfg.ForceSampleAll { rlog.Info("Warning, sampling is set to 1. This may put cluster stability at risk.") diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index a4ec7ede3..dc53ce079 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -9111,24 +9111,24 @@ Agent configuration for flows extraction.
@@ -9140,7 +9140,7 @@ Agent configuration for flows extraction. -`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`. +`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`.
features []enum - List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.

+ List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.

false
features []enum - List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.

+ List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.

false
includeList []string - `includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.
+ `includeList` is a list of metric names to specify which ones to generate. The names correspond to the names in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md
false
includeList []string - `includeList` is a list of metric names to specify which metrics to generate. The names correspond to the name in Prometheus, without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Available names are: `namespace_egress_bytes_total`, `namespace_egress_packets_total`, `namespace_ingress_bytes_total`, `namespace_ingress_packets_total`, `namespace_flows_total`, `node_egress_bytes_total`, `node_egress_packets_total`, `node_ingress_bytes_total`, `node_ingress_packets_total`, `node_flows_total`, `workload_egress_bytes_total`, `workload_egress_packets_total`, `workload_ingress_bytes_total`, `workload_ingress_packets_total`, `workload_flows_total`.
+ `includeList` is a list of metric names to specify which ones to generate. The names correspond to the names in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md
false
ebpf object - `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`.
+ `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`.
false
ipfix object - `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
+ `ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.
false
type enum - `type` selects the flows tracing agent. Possible values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ `type` selects the flows tracing agent. Possible values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
`Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, but they would require manual configuration).

- Enum: eBPF, IPFIX
- Default: eBPF
+ Enum: Ebpf, Ipfix
+ Default: Ebpf
false
@@ -9363,7 +9363,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`. +`ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.
@@ -9405,7 +9405,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -9414,14 +9414,14 @@ ResourceClaim references one entry in PodSpec.ResourceClaims.
forceSampleAll boolean - `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.
+ `forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.

Default: false
ovnKubernetes object - `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+ `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
false
sampling integer - `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.
+ `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix.

Format: int32
Default: 400
@@ -9466,7 +9466,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. +`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. @@ -10710,16 +10710,16 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -10738,7 +10738,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. -IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. +Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to.
type enum - `type` selects the type of exporters. The available options are `Kafka` and `IPFIX`.
+ `type` selects the type of exporters. The available options are `Kafka` and `Ipfix`.

- Enum: Kafka, IPFIX
+ Enum: Kafka, Ipfix
true
ipfix object - IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+ Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to.
false
@@ -10753,7 +10753,7 @@ IPFIX configuration, such as the IP address and port to send enriched IPFIX flow @@ -10762,14 +10762,14 @@ IPFIX configuration, such as the IP address and port to send enriched IPFIX flow diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml index b0e432eb0..d99d254dc 100644 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ b/hack/cloned.flows.netobserv.io_flowcollectors.yaml @@ -3535,7 +3535,7 @@ spec: name: Agent type: string - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) + name: Sampling (Ebpf) type: string - jsonPath: .spec.deploymentModel name: Deployment Model @@ -3563,7 +3563,7 @@ spec: description: Agent configuration for flows extraction. properties: ebpf: - description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`.' + description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `Ebpf`.' properties: cacheActiveTimeout: default: 5s @@ -3685,7 +3685,7 @@ spec: type: integer type: object ipfix: - description: '`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.' + description: '`ipfix` [deprecated (*)] - describes the settings related to the Ipfix-based flow reporter when `spec.agent.type` is set to `Ipfix`.' properties: cacheActiveTimeout: default: 20s @@ -3708,10 +3708,10 @@ spec: type: object forceSampleAll: default: false - description: '`forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' + description: '`forceSampleAll` allows disabling sampling in the Ipfix-based flow reporter. It is not recommended to sample all the traffic with Ipfix, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' type: boolean ovnKubernetes: - description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' + description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s Ipfix exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' properties: containerName: default: ovnkube-node @@ -3728,17 +3728,17 @@ spec: type: object sampling: default: 400 - description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.' + description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of Ipfix.' format: int32 minimum: 2 type: integer type: object type: - default: eBPF - description: '`type` selects the flows tracing agent. Possible values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).' + default: Ebpf + description: '`type` selects the flows tracing agent. Possible values are:
- `Ebpf` (default) to use NetObserv eBPF agent.
- `Ipfix` [deprecated (*)] - to use the legacy Ipfix collector.
`Ebpf` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `Ipfix` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting Ipfix, but they would require manual configuration).' enum: - - eBPF - - IPFIX + - Ebpf + - Ipfix type: string type: object consolePlugin: @@ -4242,17 +4242,17 @@ spec: description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.' properties: ipfix: - description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. + description: Ipfix configuration, such as the IP address and port to send enriched Ipfix flows to. properties: targetHost: default: "" - description: Address of the IPFIX external receiver + description: Address of the Ipfix external receiver type: string targetPort: - description: Port for the IPFIX external receiver + description: Port for the Ipfix external receiver type: integer transport: - description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. + description: Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`. enum: - TCP - UDP @@ -4387,10 +4387,10 @@ spec: - topic type: object type: - description: '`type` selects the type of exporters. The available options are `Kafka` and `IPFIX`.' + description: '`type` selects the type of exporters. The available options are `Kafka` and `Ipfix`.' enum: - Kafka - - IPFIX + - Ipfix type: string required: - type diff --git a/pkg/helper/flowcollector.go b/pkg/helper/flowcollector.go index 0ddf85561..2e736675d 100644 --- a/pkg/helper/flowcollector.go +++ b/pkg/helper/flowcollector.go @@ -10,18 +10,18 @@ import ( ) func GetSampling(spec *flowslatest.FlowCollectorSpec) int { - if UseEBPF(spec) { - return int(*spec.Agent.EBPF.Sampling) + if UseEbpf(spec) { + return int(*spec.Agent.Ebpf.Sampling) } - return int(spec.Agent.IPFIX.Sampling) + return int(spec.Agent.Ipfix.Sampling) } -func UseEBPF(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentEBPF +func UseEbpf(spec *flowslatest.FlowCollectorSpec) bool { + return spec.Agent.Type == flowslatest.AgentEbpf } -func UseIPFIX(spec *flowslatest.FlowCollectorSpec) bool { - return spec.Agent.Type == flowslatest.AgentIPFIX +func UseIpfix(spec *flowslatest.FlowCollectorSpec) bool { + return spec.Agent.Type == flowslatest.AgentIpfix } func UseKafka(spec *flowslatest.FlowCollectorSpec) bool { @@ -90,7 +90,7 @@ func UseConsolePlugin(spec *flowslatest.FlowCollectorSpec) bool { (spec.ConsolePlugin.Enable == nil || *spec.ConsolePlugin.Enable) } -func IsFeatureEnabled(spec *flowslatest.FlowCollectorEBPF, feature flowslatest.AgentFeature) bool { +func IsFeatureEnabled(spec *flowslatest.FlowCollectorEbpf, feature flowslatest.AgentFeature) bool { for _, f := range spec.Features { if f == feature { return true @@ -99,22 +99,22 @@ func IsFeatureEnabled(spec *flowslatest.FlowCollectorEBPF, feature flowslatest.A return false } -func IsPrivileged(spec *flowslatest.FlowCollectorEBPF) bool { +func IsPrivileged(spec *flowslatest.FlowCollectorEbpf) bool { return spec.Privileged } -func IsPktDropEnabled(spec *flowslatest.FlowCollectorEBPF) bool { +func IsPktDropEnabled(spec *flowslatest.FlowCollectorEbpf) bool { if IsPrivileged(spec) && IsFeatureEnabled(spec, flowslatest.PacketDrop) { return true } return false } -func IsDNSTrackingEnabled(spec *flowslatest.FlowCollectorEBPF) bool { +func IsDNSTrackingEnabled(spec *flowslatest.FlowCollectorEbpf) bool { return IsFeatureEnabled(spec, flowslatest.DNSTracking) } -func IsFlowRTTEnabled(spec *flowslatest.FlowCollectorEBPF) bool { +func IsFlowRTTEnabled(spec *flowslatest.FlowCollectorEbpf) bool { return IsFeatureEnabled(spec, flowslatest.FlowRTT) } From 9af63d58ddc6809c24c5578691bf3e86da463a55 Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Fri, 3 Nov 2023 17:18:25 -0400 Subject: [PATCH 15/16] Revert URL changes --- .../netobserv-operator.clusterserviceversion.yaml | 8 ++++---- config/descriptions/ocp.md | 4 ++-- config/descriptions/upstream.md | 4 ++-- config/manager/kustomization.yaml | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index f953fcc63..64497faa4 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -526,7 +526,7 @@ metadata: capabilities: Seamless Upgrades categories: Monitoring, Networking console.openshift.io/plugins: '["netobserv-plugin"]' - containerImage: quay.io/amoghrd/network-observability-operator:main + containerImage: quay.io/netobserv/network-observability-operator:1.0.4 createdAt: ':created-at:' description: Network flows collector and monitoring solution operatorframework.io/suggested-namespace: openshift-netobserv-operator @@ -598,7 +598,7 @@ spec: ## Configuration - The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). + The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -616,7 +616,7 @@ spec: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. - - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). + - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -886,7 +886,7 @@ spec: - name: DOWNSTREAM_DEPLOYMENT value: "false" - name: PROFILING_BIND_ADDRESS - image: quay.io/amoghrd/network-observability-operator:main + image: quay.io/netobserv/network-observability-operator:1.0.4 imagePullPolicy: Always livenessProbe: httpGet: diff --git a/config/descriptions/ocp.md b/config/descriptions/ocp.md index e0e86d923..6e2e48cbc 100644 --- a/config/descriptions/ocp.md +++ b/config/descriptions/ocp.md @@ -28,7 +28,7 @@ oc apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents/252b ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -44,7 +44,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md index 30e670499..04c610fcb 100644 --- a/config/descriptions/upstream.md +++ b/config/descriptions/upstream.md @@ -32,7 +32,7 @@ kubectl apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -50,7 +50,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index fb1512921..f69dc045d 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -14,7 +14,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/amoghrd/network-observability-operator - newTag: main + newName: quay.io/netobserv/network-observability-operator + newTag: 1.0.4 commonLabels: app: netobserv-operator From 64b8e54ced5a86b66779ee7d0cdd1df651e83876 Mon Sep 17 00:00:00 2001 From: Amoghrd Date: Fri, 3 Nov 2023 17:09:52 -0400 Subject: [PATCH 16/16] Update camelCase for other fields --- .../netobserv-operator.clusterserviceversion.yaml | 8 ++++---- config/descriptions/ocp.md | 4 ++-- config/descriptions/upstream.md | 4 ++-- config/manager/kustomization.yaml | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 64497faa4..f953fcc63 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -526,7 +526,7 @@ metadata: capabilities: Seamless Upgrades categories: Monitoring, Networking console.openshift.io/plugins: '["netobserv-plugin"]' - containerImage: quay.io/netobserv/network-observability-operator:1.0.4 + containerImage: quay.io/amoghrd/network-observability-operator:main createdAt: ':created-at:' description: Network flows collector and monitoring solution operatorframework.io/suggested-namespace: openshift-netobserv-operator @@ -598,7 +598,7 @@ spec: ## Configuration - The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). + The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -616,7 +616,7 @@ spec: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. - - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). + - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -886,7 +886,7 @@ spec: - name: DOWNSTREAM_DEPLOYMENT value: "false" - name: PROFILING_BIND_ADDRESS - image: quay.io/netobserv/network-observability-operator:1.0.4 + image: quay.io/amoghrd/network-observability-operator:main imagePullPolicy: Always livenessProbe: httpGet: diff --git a/config/descriptions/ocp.md b/config/descriptions/ocp.md index 6e2e48cbc..e0e86d923 100644 --- a/config/descriptions/ocp.md +++ b/config/descriptions/ocp.md @@ -28,7 +28,7 @@ oc apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents/252b ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -44,7 +44,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md index 04c610fcb..30e670499 100644 --- a/config/descriptions/upstream.md +++ b/config/descriptions/upstream.md @@ -32,7 +32,7 @@ kubectl apply -f <(curl -L https://raw.githubusercontent.com/netobserv/documents ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.0.4/config/samples/flows_v1beta1_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/main/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/main/config/samples/flows_v1beta1_flowcollector.yaml). To edit configuration in cluster, run: @@ -50,7 +50,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you may have to configure differently if you used another installation method. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.0.4/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/main/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index f69dc045d..fb1512921 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -14,7 +14,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/netobserv/network-observability-operator - newTag: 1.0.4 + newName: quay.io/amoghrd/network-observability-operator + newTag: main commonLabels: app: netobserv-operator
targetHost string - Address of the IPFIX external receiver
+ Address of the Ipfix external receiver

Default:
targetPort integer - Port for the IPFIX external receiver
+ Port for the Ipfix external receiver
true
transport enum - Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
+ Transport protocol (`TCP` or `UDP`) to be used for the Ipfix connection, defaults to `TCP`.

Enum: TCP, UDP