Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support kubernetes_role argument for prometheus.operator.servicemonitors #2023

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ Main (unreleased)

- Add support to `loki.source.api` to be able to extract the tenant from the HTTP `X-Scope-OrgID` header (@QuentinBisson)

- Add support to `prometheus.operator.servicemonitors` to allow `endpointslice` role. (@yoyosir)

- (_Experimental_) Add a `loki.secretfilter` component to redact secrets from collected logs.

- (_Experimental_) Add a `prometheus.write.queue` component to add an alternative to `prometheus.remote_write`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,11 @@ prometheus.operator.servicemonitors "LABEL" {

The following arguments are supported:

Name | Type | Description | Default | Required
---- | ---- | ----------- | ------- | --------
`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes
`namespaces` | `list(string)` | List of namespaces to search for ServiceMonitor resources. If not specified, all namespaces will be searched. || no
Name | Type | Description | Default | Required
---- | ---- |----------------------------------------------------------------------------------------------------------------------------| ------- | --------
`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes
`namespaces` | `list(string)` | List of namespaces to search for ServiceMonitor resources. If not specified, all namespaces will be searched. || no
`kubernetes_role` | `string` | The Kubernetes role used for discovery. Supports `endpoints` or `endpointslice` If not specified, the `endpoints` role is used. || no

## Blocks

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
promk8s "github.com/prometheus/prometheus/discovery/kubernetes"
"sort"
"strings"
"sync"
Expand Down Expand Up @@ -469,7 +470,11 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) {
mapKeys := []string{}
for i, ep := range sm.Spec.Endpoints {
var scrapeConfig *config.ScrapeConfig
scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i)
role := promk8s.Role(c.args.KubernetesRole)
if role == "" {
role = promk8s.RoleEndpoint
}
scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i, role)
if err != nil {
// TODO(jcreixell): Generate Kubernetes event to inform of this error when running `kubectl get <servicemonitor>`.
level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error generating scrapeconfig from serviceMonitor")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonitor, ep promopv1.Endpoint, i int) (cfg *config.ScrapeConfig, err error) {
func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonitor, ep promopv1.Endpoint, i int, role promk8s.Role) (cfg *config.ScrapeConfig, err error) {
cfg = cg.generateDefaultScrapeConfig()

cfg.JobName = fmt.Sprintf("serviceMonitor/%s/%s/%d", m.Namespace, m.Name, i)
cfg.HonorLabels = ep.HonorLabels
if ep.HonorTimestamps != nil {
cfg.HonorTimestamps = *ep.HonorTimestamps
}
dConfig := cg.generateK8SSDConfig(m.Spec.NamespaceSelector, m.Namespace, promk8s.RoleEndpoint, m.Spec.AttachMetadata)
dConfig := cg.generateK8SSDConfig(m.Spec.NamespaceSelector, m.Namespace, role, m.Spec.AttachMetadata)
cfg.ServiceDiscoveryConfigs = append(cfg.ServiceDiscoveryConfigs, dConfig)

if ep.Interval != "" {
Expand Down Expand Up @@ -153,14 +153,18 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit
}
}

labelPortName := "__meta_kubernetes_endpoint_port_name"
if role == promk8s.RoleEndpointSlice {
labelPortName = "__meta_kubernetes_endpointslice_port_name"
}
// Filter targets based on correct port for the endpoint.
if ep.Port != "" {
regex, err := relabel.NewRegexp(ep.Port)
if err != nil {
return nil, fmt.Errorf("parsing Port as regex: %w", err)
}
relabels.add(&relabel.Config{
SourceLabels: model.LabelNames{"__meta_kubernetes_endpoint_port_name"},
SourceLabels: model.LabelNames{model.LabelName(labelPortName)},
Action: "keep",
Regex: regex,
})
Expand Down Expand Up @@ -191,6 +195,9 @@ func (cg *ConfigGenerator) GenerateServiceMonitorConfig(m *promopv1.ServiceMonit
}

sourceLabels := model.LabelNames{"__meta_kubernetes_endpoint_address_target_kind", "__meta_kubernetes_endpoint_address_target_name"}
if role == promk8s.RoleEndpointSlice {
sourceLabels = model.LabelNames{"__meta_kubernetes_endpointslice_address_target_kind", "__meta_kubernetes_endpointslice_address_target_name"}
}
// Relabel namespace and pod and service labels into proper labels.
// Relabel node labels with meta labels available with Prometheus >= v2.3.
relabels.add(&relabel.Config{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
name string
m *promopv1.ServiceMonitor
ep promopv1.Endpoint
role promk8s.Role
expectedRelabels string
expectedMetricRelabels string
expected *config.ScrapeConfig
Expand All @@ -44,7 +45,8 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
Name: "svcmonitor",
},
},
ep: promopv1.Endpoint{},
ep: promopv1.Endpoint{},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -110,6 +112,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
ep: promopv1.Endpoint{
TargetPort: &intstr.IntOrString{StrVal: "http_metrics", Type: intstr.String},
},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -180,6 +183,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
ep: promopv1.Endpoint{
TargetPort: &intstr.IntOrString{IntVal: 4242, Type: intstr.Int},
},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -239,6 +243,77 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
},
},
},
{
name: "role_endpointslice",
m: &promopv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Namespace: "operator",
Name: "svcmonitor",
},
},
ep: promopv1.Endpoint{
TargetPort: &intstr.IntOrString{IntVal: 4242, Type: intstr.Int},
},
role: promk8s.RoleEndpointSlice,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
- source_labels: [job]
target_label: __tmp_prometheus_job_name
- source_labels: ["__meta_kubernetes_pod_container_port_number"]
regex: "4242"
action: "keep"
- source_labels: [__meta_kubernetes_endpointslice_address_target_kind, __meta_kubernetes_endpointslice_address_target_name]
regex: Node;(.*)
target_label: node
replacement: ${1}
- source_labels: [__meta_kubernetes_endpointslice_address_target_kind, __meta_kubernetes_endpointslice_address_target_name]
regex: Pod;(.*)
target_label: pod
action: replace
replacement: ${1}
- source_labels: [__meta_kubernetes_namespace]
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: service
- source_labels: [__meta_kubernetes_pod_container_name]
target_label: container
- source_labels: [__meta_kubernetes_pod_name]
target_label: pod
- source_labels: [__meta_kubernetes_pod_phase]
regex: (Failed|Succeeded)
action: drop
- source_labels: [__meta_kubernetes_service_name]
target_label: job
replacement: ${1}
- target_label: endpoint
replacement: "4242"
`),
expected: &config.ScrapeConfig{
JobName: "serviceMonitor/operator/svcmonitor/1",
HonorTimestamps: true,
ScrapeInterval: model.Duration(time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
ScrapeProtocols: config.DefaultScrapeProtocols,
EnableCompression: true,
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: commonConfig.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: discovery.Configs{
&promk8s.SDConfig{
Role: "endpointslice",

NamespaceDiscovery: promk8s.NamespaceDiscovery{
IncludeOwnNamespace: false,
Names: []string{"operator"},
},
},
},
},
},
{
name: "everything",
m: &promopv1.ServiceMonitor{
Expand Down Expand Up @@ -308,6 +383,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
},
},
},
role: promk8s.RoleEndpoint,
expectedRelabels: util.Untab(`
- target_label: __meta_foo
replacement: bar
Expand Down Expand Up @@ -427,7 +503,7 @@ func TestGenerateServiceMonitorConfig(t *testing.T) {
{TargetLabel: "__meta_foo", Replacement: "bar"},
},
}
cfg, err := cg.GenerateServiceMonitorConfig(tc.m, tc.ep, 1)
cfg, err := cg.GenerateServiceMonitorConfig(tc.m, tc.ep, 1, tc.role)
require.NoError(t, err)
// check relabel configs separately
rlcs := cfg.RelabelConfigs
Expand Down
2 changes: 2 additions & 0 deletions internal/component/prometheus/operator/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ type Arguments struct {
// Namespaces to search for monitor resources. Empty implies All namespaces
Namespaces []string `alloy:"namespaces,attr,optional"`

KubernetesRole string `alloy:"kubernetes_role,attr,optional"`

// LabelSelector allows filtering discovered monitor resources by labels
LabelSelector *config.LabelSelector `alloy:"selector,block,optional"`

Expand Down