diff --git a/charts/feature-auto-instrumentation/.helmignore b/charts/feature-auto-instrumentation/.helmignore new file mode 100644 index 000000000..2b29eaf56 --- /dev/null +++ b/charts/feature-auto-instrumentation/.helmignore @@ -0,0 +1,6 @@ +docs +schema-mods +tests +Makefile +README.md +README.md.gotmpl diff --git a/charts/feature-auto-instrumentation/.updatecli-beyla.yaml b/charts/feature-auto-instrumentation/.updatecli-beyla.yaml new file mode 100644 index 000000000..dd851218f --- /dev/null +++ b/charts/feature-auto-instrumentation/.updatecli-beyla.yaml @@ -0,0 +1,31 @@ +--- +name: Update dependency "beyla" for Helm chart "feature-auto-instrumentation" +sources: + beyla: + name: Get latest "beyla" Helm chart version + kind: helmchart + spec: + name: beyla + url: https://grafana.github.io/helm-charts + versionfilter: + kind: semver + pattern: '*' +conditions: + beyla: + name: Ensure Helm chart dependency "beyla" is specified + kind: yaml + spec: + file: charts/feature-auto-instrumentation/Chart.yaml + key: $.dependencies[0].name + value: beyla + disablesourceinput: true +targets: + beyla: + name: Bump Helm chart dependency "beyla" for Helm chart "feature-auto-instrumentation" + kind: helmchart + spec: + file: Chart.yaml + key: $.dependencies[0].version + name: charts/feature-auto-instrumentation + versionincrement: none + sourceid: beyla diff --git a/charts/feature-auto-instrumentation/Chart.lock b/charts/feature-auto-instrumentation/Chart.lock new file mode 100644 index 000000000..dc52ab56c --- /dev/null +++ b/charts/feature-auto-instrumentation/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: beyla + repository: https://grafana.github.io/helm-charts + version: 1.4.11 +digest: sha256:9b033f8958e90acc3d4d51f14cadf332be42b9c83a62a1492d2db421f946c650 +generated: "2024-11-12T16:42:11.373589-07:00" diff --git a/charts/feature-auto-instrumentation/Chart.yaml b/charts/feature-auto-instrumentation/Chart.yaml new file mode 100644 index 000000000..583884009 --- /dev/null +++ b/charts/feature-auto-instrumentation/Chart.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v2 +name: k8s-monitoring-feature-auto-instrumentation +description: Gathers telemetry data via automatic instrumentation +icon: https://raw.githubusercontent.com/grafana/grafana/main/public/img/grafana_icon.svg +sources: + - https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-annotation-autodiscovery +version: 1.0.0 +appVersion: 1.0.0 +maintainers: + - email: pete.wall@grafana.com + name: petewall +dependencies: + - name: beyla + version: 1.4.11 + repository: https://grafana.github.io/helm-charts + condition: beyla.enabled diff --git a/charts/feature-auto-instrumentation/Makefile b/charts/feature-auto-instrumentation/Makefile new file mode 100644 index 000000000..de2ac62c1 --- /dev/null +++ b/charts/feature-auto-instrumentation/Makefile @@ -0,0 +1,40 @@ +HAS_HELM_DOCS := $(shell command -v helm-docs;) +HAS_HELM_UNITTEST := $(shell helm plugin list | grep unittest 2> /dev/null) +UPDATECLI_FILES := $(shell yq -e '.dependencies[] | select(.repository == "http*") | ".updatecli-" + .name + ".yaml"' Chart.yaml 2>/dev/null | sort | uniq) + +.SECONDEXPANSION: +README.md: values.yaml Chart.yaml $$(wildcard README.md.gotmpl) +ifdef HAS_HELM_DOCS + helm-docs +else + docker run --rm --volume "$(shell pwd):/helm-docs" -u $(shell id -u) jnorwood/helm-docs:latest +endif + +Chart.lock: Chart.yaml + helm dependency update . + @touch Chart.lock # Ensure the timestamp is updated + +values.schema.json: values.yaml $$(wildcard schema-mods/*) + ../../scripts/schema-gen.sh . + +.updatecli-%.yaml: Chart.yaml + ../../scripts/charts-to-updatecli.sh Chart.yaml + +.PHONY: clean +clean: + rm -f README.md values.schema.json $(UPDATECLI_FILES) + +.PHONY: build +build: README.md Chart.lock values.schema.json $(UPDATECLI_FILES) + +.PHONY: test +test: build + helm repo add grafana https://grafana.github.io/helm-charts + + helm lint . + ct lint --lint-conf ../../.configs/lintconf.yaml --helm-dependency-extra-args=--skip-refresh --charts . +ifdef HAS_HELM_UNITTEST + helm unittest . +else + docker run --rm --volume $(shell pwd):/apps helmunittest/helm-unittest . +endif diff --git a/charts/feature-auto-instrumentation/README.md b/charts/feature-auto-instrumentation/README.md new file mode 100644 index 000000000..1958fb7f0 --- /dev/null +++ b/charts/feature-auto-instrumentation/README.md @@ -0,0 +1,73 @@ + + +# k8s-monitoring-feature-auto-instrumentation + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) +Gathers telemetry data via automatic instrumentation + +The auto-instrumentation feature deploys Grafana Beyla to automatically instrument programs running on this cluster +using eBPF. + +## Testing + +This chart contains unit tests to verify the generated configuration. A hidden value, `deployAsConfigMap`, will render +the generated configuration into a ConfigMap object. This ConfigMap is not used during regular operation, but it is +useful for showing the outcome of a given values file. + +The unit tests use this to create an object with the configuration that can be asserted against. To run the tests, use +`helm test`. + +Actual integration testing in a live environment should be done in the main [k8s-monitoring](../k8s-monitoring) chart. + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| petewall | | | + + +## Source Code + +* + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://grafana.github.io/helm-charts | beyla | 1.4.11 | + + + +## Values + +### Beyla + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| beyla.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for Beyla. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| beyla.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Beyla. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no __meta* labels are present. | +| beyla.labelMatchers | object | `{"app.kubernetes.io/name":"beyla"}` | Label matchers used to select the Beyla pods for scraping metrics. | +| beyla.maxCacheSize | string | 100000 | Sets the max_cache_size for the prometheus.relabel component for Beyla. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | +| beyla.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| beyla.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | +| beyla.preset | string | `"application"` | The configuration preset to use. Valid options are "application" or "network". | +| beyla.scrapeInterval | string | 60s | How frequently to scrape metrics from Beyla. Overrides metrics.scrapeInterval | +| beyla.service | object | `{"targetPort":9090}` | The port number for the Beyla service. | + +### General settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| fullnameOverride | string | `""` | Full name override | +| nameOverride | string | `""` | Name override | + +### Global Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.maxCacheSize | int | `100000` | Sets the max_cache_size for every prometheus.relabel component. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) This should be at least 2x-5x your largest scrape target or samples appended rate. | +| global.scrapeInterval | string | `"60s"` | How frequently to scrape metrics. | + diff --git a/charts/feature-auto-instrumentation/README.md.gotmpl b/charts/feature-auto-instrumentation/README.md.gotmpl new file mode 100644 index 000000000..3998ebb0a --- /dev/null +++ b/charts/feature-auto-instrumentation/README.md.gotmpl @@ -0,0 +1,36 @@ + + +{{ template "chart.header" . }} +{{ template "chart.deprecationWarning" . }} +{{ template "chart.badgesSection" . }} +{{ template "chart.description" . }} +{{ template "chart.homepageLine" . }} + +The auto-instrumentation feature deploys Grafana Beyla to automatically instrument programs running on this cluster +using eBPF. + +## Testing + +This chart contains unit tests to verify the generated configuration. A hidden value, `deployAsConfigMap`, will render +the generated configuration into a ConfigMap object. This ConfigMap is not used during regular operation, but it is +useful for showing the outcome of a given values file. + +The unit tests use this to create an object with the configuration that can be asserted against. To run the tests, use +`helm test`. + +Actual integration testing in a live environment should be done in the main [k8s-monitoring](../k8s-monitoring) chart. + +{{ template "chart.maintainersSection" . }} + + +{{ template "chart.sourcesSection" . }} + +{{ template "chart.requirementsSection" . }} + + + +{{ template "chart.valuesSection" . }} + diff --git a/charts/feature-auto-instrumentation/charts/beyla-1.4.11.tgz b/charts/feature-auto-instrumentation/charts/beyla-1.4.11.tgz new file mode 100644 index 000000000..a896b70e3 Binary files /dev/null and b/charts/feature-auto-instrumentation/charts/beyla-1.4.11.tgz differ diff --git a/charts/feature-auto-instrumentation/schema-mods/types-and-enums.json b/charts/feature-auto-instrumentation/schema-mods/types-and-enums.json new file mode 100644 index 000000000..374770d91 --- /dev/null +++ b/charts/feature-auto-instrumentation/schema-mods/types-and-enums.json @@ -0,0 +1,5 @@ +{ + "properties": { + "beyla": {"properties": {"preset": {"enum": ["application", "network"]}}} + } +} diff --git a/charts/feature-auto-instrumentation/templates/_helpers.tpl b/charts/feature-auto-instrumentation/templates/_helpers.tpl new file mode 100644 index 000000000..a37170c82 --- /dev/null +++ b/charts/feature-auto-instrumentation/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* +Create a default fully qualified name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature.autoInstrumentation.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride | lower }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" | lower }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "escape_annotation" -}} +{{ . | replace "-" "_" | replace "." "_" | replace "/" "_" }} +{{- end }} + +{{- define "pod_annotation" -}} +{{ printf "__meta_kubernetes_pod_annotation_%s" (include "escape_annotation" .) }} +{{- end }} + +{{- define "service_annotation" -}} +{{ printf "__meta_kubernetes_service_annotation_%s" (include "escape_annotation" .) }} +{{- end }} diff --git a/charts/feature-auto-instrumentation/templates/_module.alloy.tpl b/charts/feature-auto-instrumentation/templates/_module.alloy.tpl new file mode 100644 index 000000000..8a12a9a48 --- /dev/null +++ b/charts/feature-auto-instrumentation/templates/_module.alloy.tpl @@ -0,0 +1,89 @@ +{{- define "feature.autoInstrumentation.module" }} +{{- $metricAllowList := .Values.beyla.metricsTuning.includeMetrics }} +{{- $metricDenyList := .Values.beyla.metricsTuning.excludeMetrics }} +{{- $labelSelectors := list }} +{{- range $k, $v := .Values.beyla.labelMatchers }} +{{- $labelSelectors = append $labelSelectors (printf "%s=%s" $k $v) }} +{{- end }} +declare "auto_instrumentation" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "beyla_pods" { + role = "pod" + namespaces { + own_namespace = true + } + selectors { + role = "pod" + label = {{ $labelSelectors | join "," | quote }} + } + } + + discovery.relabel "beyla_pods" { + targets = discovery.kubernetes.beyla_pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + action = "replace" + target_label = "instance" + } + +{{- if .Values.beyla.extraDiscoveryRules }} +{{ .Values.beyla.extraDiscoveryRules | indent 4 }} +{{- end }} + } + + prometheus.scrape "beyla_applications" { + targets = discovery.relabel.beyla_pods.output + honor_labels = true + scrape_interval = {{ .Values.beyla.scrapeInterval | default .Values.global.scrapeInterval | quote }} + clustering { + enabled = true + } +{{- if or $metricAllowList $metricDenyList .Values.beyla.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.beyla.receiver] +{{- else }} + forward_to = argument.metrics_destinations.value +{{- end }} + } + + prometheus.scrape "beyla_internal" { + targets = discovery.relabel.beyla_pods.output + metrics_path = "/internal/metrics" + job_name = "integrations/beyla" + honor_labels = true + scrape_interval = {{ .Values.beyla.scrapeInterval | default .Values.global.scrapeInterval | quote }} + clustering { + enabled = true + } +{{- if or $metricAllowList $metricDenyList .Values.beyla.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.beyla.receiver] + } + +prometheus.relabel "beyla" { + max_cache_size = {{ .Values.beyla.maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if $metricAllowList }} + rule { + source_labels = ["__name__"] + regex = "up|{{ $metricAllowList | join "|" }}" + action = "keep" + } +{{- end }} +{{- if $metricDenyList }} + rule { + source_labels = ["__name__"] + regex = {{ $metricDenyList | join "|" | quote }} + action = "drop" + } +{{- end }} +{{- if .Values.beyla.extraMetricProcessingRules }} +{{ .Values.beyla.extraMetricProcessingRules | indent 4 }} +{{- end }} +{{- end }} + forward_to = argument.metrics_destinations.value + } +} +{{- end -}} + +{{- define "feature.autoInstrumentation.alloyModules" }}{{- end }} diff --git a/charts/feature-auto-instrumentation/templates/_notes.tpl b/charts/feature-auto-instrumentation/templates/_notes.tpl new file mode 100644 index 000000000..84e380000 --- /dev/null +++ b/charts/feature-auto-instrumentation/templates/_notes.tpl @@ -0,0 +1,13 @@ +{{- define "feature.autoInstrumentation.notes.deployments" }} +* Grafana Beyla (Daemonset) +{{- end }} + +{{- define "feature.autoInstrumentation.notes.task" }} +Automatically instrument applications and services running in the cluster with Grafana Beyla +{{- end }} + +{{- define "feature.autoInstrumentation.notes.actions" }}{{- end }} + +{{- define "feature.autoInstrumentation.summary" -}} +version: {{ .Chart.Version }} +{{- end }} diff --git a/charts/feature-auto-instrumentation/templates/configmap.yaml b/charts/feature-auto-instrumentation/templates/configmap.yaml new file mode 100644 index 000000000..23e8781bc --- /dev/null +++ b/charts/feature-auto-instrumentation/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.deployAsConfigMap }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "feature.autoInstrumentation.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + module.alloy: |- + {{- include "feature.autoInstrumentation.module" . | indent 4 }} +{{- end }} diff --git a/charts/feature-auto-instrumentation/tests/__snapshot__/.gitkeep b/charts/feature-auto-instrumentation/tests/__snapshot__/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/charts/feature-auto-instrumentation/tests/default_test.yaml b/charts/feature-auto-instrumentation/tests/default_test.yaml new file mode 100644 index 000000000..e094c05c2 --- /dev/null +++ b/charts/feature-auto-instrumentation/tests/default_test.yaml @@ -0,0 +1,61 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test default values +templates: + - configmap.yaml +tests: + - it: creates a module with default Beyla configuration + set: + deployAsConfigMap: true + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "auto_instrumentation" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "beyla_pods" { + role = "pod" + namespaces { + own_namespace = true + } + selectors { + role = "pod" + label = "app.kubernetes.io/name=beyla" + } + } + + discovery.relabel "beyla_pods" { + targets = discovery.kubernetes.beyla_pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + action = "replace" + target_label = "instance" + } + } + + prometheus.scrape "beyla_applications" { + targets = discovery.relabel.beyla_pods.output + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "beyla_internal" { + targets = discovery.relabel.beyla_pods.output + metrics_path = "/internal/metrics" + job_name = "integrations/beyla" + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + } diff --git a/charts/feature-auto-instrumentation/values.schema.json b/charts/feature-auto-instrumentation/values.schema.json new file mode 100644 index 000000000..02b77e63e --- /dev/null +++ b/charts/feature-auto-instrumentation/values.schema.json @@ -0,0 +1,104 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "beyla": { + "type": "object", + "properties": { + "config": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "skipConfigMapCheck": { + "type": "boolean" + } + } + }, + "extraDiscoveryRules": { + "type": "string" + }, + "extraMetricProcessingRules": { + "type": "string" + }, + "labelMatchers": { + "type": "object", + "properties": { + "app.kubernetes.io/name": { + "type": "string" + } + } + }, + "maxCacheSize": { + "type": "null" + }, + "metricsTuning": { + "type": "object", + "properties": { + "excludeMetrics": { + "type": "array" + }, + "includeMetrics": { + "type": "array" + } + } + }, + "nodeSelector": { + "type": "object", + "properties": { + "kubernetes.io/os": { + "type": "string" + } + } + }, + "podAnnotations": { + "type": "object", + "properties": { + "k8s.grafana.com/logs.job": { + "type": "string" + } + } + }, + "preset": { + "type": "string", + "enum": [ + "application", + "network" + ] + }, + "scrapeInterval": { + "type": "string" + }, + "service": { + "type": "object", + "properties": { + "targetPort": { + "type": "integer" + } + } + } + } + }, + "deployAsConfigMap": { + "type": "boolean" + }, + "fullnameOverride": { + "type": "string" + }, + "global": { + "type": "object", + "properties": { + "maxCacheSize": { + "type": "integer" + }, + "scrapeInterval": { + "type": "string" + } + } + }, + "nameOverride": { + "type": "string" + } + } +} diff --git a/charts/feature-auto-instrumentation/values.yaml b/charts/feature-auto-instrumentation/values.yaml new file mode 100644 index 000000000..ce892db32 --- /dev/null +++ b/charts/feature-auto-instrumentation/values.yaml @@ -0,0 +1,87 @@ +--- +# -- Name override +# @section -- General settings +nameOverride: "" + +# -- Full name override +# @section -- General settings +fullnameOverride: "" + +global: + # -- How frequently to scrape metrics. + # @section -- Global Settings + scrapeInterval: 60s + + # -- Sets the max_cache_size for every prometheus.relabel component. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) + # This should be at least 2x-5x your largest scrape target or samples appended rate. + # @section -- Global Settings + maxCacheSize: 100000 + +beyla: + # -- The configuration preset to use. Valid options are "application" or "network". + # @section -- Beyla + preset: application + + # -- How frequently to scrape metrics from Beyla. + # Overrides metrics.scrapeInterval + # @default -- 60s + # @section -- Beyla + scrapeInterval: "" + + # -- Label matchers used to select the Beyla pods for scraping metrics. + # @section -- Beyla + labelMatchers: + app.kubernetes.io/name: beyla + + # -- Rule blocks to be added to the discovery.relabel component for Beyla. + # These relabeling rules are applied pre-scrape against the targets from service discovery. + # Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. + # ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) + # @section -- Beyla + extraDiscoveryRules: "" + + # -- Rule blocks to be added to the prometheus.relabel component for Beyla. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) + # These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no __meta* labels are present. + # @section -- Beyla + extraMetricProcessingRules: "" + + # Adjustments to the scraped metrics to filter the amount of data sent to storage. + # @section -- Beyla + metricsTuning: + # -- Metrics to keep. Can use regular expressions. + # @section -- Beyla + includeMetrics: [] + # -- Metrics to drop. Can use regular expressions. + # @section -- Beyla + excludeMetrics: [] + + # -- Sets the max_cache_size for the prometheus.relabel component for Beyla. + # This should be at least 2x-5x your largest scrape target or samples appended rate. + # ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) + # Overrides metrics.maxCacheSize + # @default -- 100000 + # @section -- Beyla + maxCacheSize: + + # @ignored + config: + # @ignored -- This allows this chart to create the ConfigMap while also keeping the default name + skipConfigMapCheck: true + # @ignored -- This allows this chart to create the Beyla ConfigMap with required modifications + create: false + + # -- The port number for the Beyla service. + # @section -- Beyla + service: + targetPort: 9090 + + # @ignored + podAnnotations: + k8s.grafana.com/logs.job: integrations/beyla + + # @ignored -- Beyla can only install to Linux nodes + nodeSelector: + kubernetes.io/os: linux + +# @ignore +deployAsConfigMap: false diff --git a/charts/feature-cluster-metrics/tests/control_plane_test.yaml b/charts/feature-cluster-metrics/tests/control_plane_test.yaml index 8f0b12d2b..c4ae3b313 100644 --- a/charts/feature-cluster-metrics/tests/control_plane_test.yaml +++ b/charts/feature-cluster-metrics/tests/control_plane_test.yaml @@ -335,5 +335,3 @@ tests: forward_to = argument.metrics_destinations.value } } - - diff --git a/charts/k8s-monitoring/.updatecli-alloy.yaml b/charts/k8s-monitoring/.updatecli-alloy.yaml index 6f0b52251..b94001f48 100644 --- a/charts/k8s-monitoring/.updatecli-alloy.yaml +++ b/charts/k8s-monitoring/.updatecli-alloy.yaml @@ -16,7 +16,7 @@ conditions: kind: yaml spec: file: charts/k8s-monitoring/Chart.yaml - key: $.dependencies[8].name + key: $.dependencies[9].name value: alloy disablesourceinput: true targets: @@ -25,7 +25,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[8].version + key: $.dependencies[9].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -34,7 +34,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[9].version + key: $.dependencies[10].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -43,7 +43,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[10].version + key: $.dependencies[11].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -52,7 +52,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[11].version + key: $.dependencies[12].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -61,7 +61,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[12].version + key: $.dependencies[13].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy diff --git a/charts/k8s-monitoring/Chart.lock b/charts/k8s-monitoring/Chart.lock index 2f6d724c9..f5fddcb33 100644 --- a/charts/k8s-monitoring/Chart.lock +++ b/charts/k8s-monitoring/Chart.lock @@ -5,6 +5,9 @@ dependencies: - name: k8s-monitoring-feature-application-observability repository: file://../feature-application-observability version: 1.0.0 +- name: k8s-monitoring-feature-auto-instrumentation + repository: file://../feature-auto-instrumentation + version: 1.0.0 - name: k8s-monitoring-feature-cluster-events repository: file://../feature-cluster-events version: 1.0.0 @@ -38,5 +41,5 @@ dependencies: - name: alloy repository: https://grafana.github.io/helm-charts version: 0.9.2 -digest: sha256:f5738b270a715d0fd122f5db19a928aceb4470a21314366cd91b8535fbcdbbee -generated: "2024-11-12T09:09:17.466562-07:00" +digest: sha256:38b7f53f9f9c9238b59fd442534e911368ed199125db7d48a90fc9bdbbd30c2a +generated: "2024-11-12T16:45:53.421112-07:00" diff --git a/charts/k8s-monitoring/Chart.yaml b/charts/k8s-monitoring/Chart.yaml index 27cf6c56c..dea3df410 100644 --- a/charts/k8s-monitoring/Chart.yaml +++ b/charts/k8s-monitoring/Chart.yaml @@ -22,6 +22,11 @@ dependencies: repository: file://../feature-application-observability version: 1.0.0 condition: applicationObservability.enabled + - alias: autoInstrumentation + name: k8s-monitoring-feature-auto-instrumentation + repository: file://../feature-auto-instrumentation + version: 1.0.0 + condition: autoInstrumentation.enabled - alias: clusterEvents name: k8s-monitoring-feature-cluster-events repository: file://../feature-cluster-events diff --git a/charts/k8s-monitoring/README.md b/charts/k8s-monitoring/README.md index de493984a..39d135d98 100644 --- a/charts/k8s-monitoring/README.md +++ b/charts/k8s-monitoring/README.md @@ -125,6 +125,7 @@ podLogs: |------------|------|---------| | file://../feature-annotation-autodiscovery | annotationAutodiscovery(k8s-monitoring-feature-annotation-autodiscovery) | 1.0.0 | | file://../feature-application-observability | applicationObservability(k8s-monitoring-feature-application-observability) | 1.0.0 | +| file://../feature-auto-instrumentation | autoInstrumentation(k8s-monitoring-feature-auto-instrumentation) | 1.0.0 | | file://../feature-cluster-events | clusterEvents(k8s-monitoring-feature-cluster-events) | 1.0.0 | | file://../feature-cluster-metrics | clusterMetrics(k8s-monitoring-feature-cluster-metrics) | 1.0.0 | | file://../feature-integrations | integrations(k8s-monitoring-feature-integrations) | 1.0.0 | @@ -282,6 +283,14 @@ podLogs: | applicationObservability.destinations | list | `[]` | The destinations where application data will be sent. If empty, all capable destinations will be used. | | applicationObservability.enabled | bool | `false` | Enable gathering Kubernetes Pod logs. | +### Features - Auto-Instrumentation + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| autoInstrumentation | object | Disabled | Auto-Instrumentation. Requires destinations that supports metrics, logs, and traces. To see the valid options, please see the [Application Observability feature documentation](https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-application-observability). | +| autoInstrumentation.destinations | list | `[]` | The destinations where application data will be sent. If empty, all capable destinations will be used. | +| autoInstrumentation.enabled | bool | `false` | Enable gathering Kubernetes Pod logs. | + ### Cluster | Key | Type | Default | Description | diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz index f350c7069..f4ad42a92 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz index 43a806281..3481ef5a2 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz new file mode 100644 index 000000000..6f79fa20b Binary files /dev/null and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz index 0bef42a76..3bdcd5054 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz index a6a006735..a10f6fd0e 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz index e57a7e8b3..a17371de0 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz index 016d3d13f..7e52b6555 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz index 2f1a6473c..4198a840a 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz index 84f432c86..dfd2a6364 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/README.md b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/README.md new file mode 100644 index 000000000..47ca4625d --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/README.md @@ -0,0 +1,42 @@ + +# Example: features/auto-instrumentation/beyla-metrics-and-traces/values.yaml + +## Values + +```yaml +--- +cluster: + name: annotation-autodiscovery-with-traces-cluster + +destinations: + - name: otlp-gateway + type: otlp + url: http://otlp-gateway.example.com + metrics: {enabled: true} + logs: {enabled: true} + traces: {enabled: true} + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +autoInstrumentation: + enabled: true + +alloy-metrics: + enabled: true + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +``` diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-metrics.alloy new file mode 100644 index 000000000..aa1e8e03f --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-metrics.alloy @@ -0,0 +1,150 @@ +// Destination: otlp-gateway (otlp) +otelcol.receiver.prometheus "otlp_gateway" { + output { + metrics = [otelcol.processor.transform.otlp_gateway.input] + } +} +otelcol.receiver.loki "otlp_gateway" { + output { + logs = [otelcol.processor.transform.otlp_gateway.input] + } +} + +otelcol.processor.transform "otlp_gateway" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + log_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + trace_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + + output { + metrics = [otelcol.exporter.otlp.otlp_gateway.input] + logs = [otelcol.exporter.otlp.otlp_gateway.input] + traces = [otelcol.exporter.otlp.otlp_gateway.input] + } +} +otelcol.exporter.otlp "otlp_gateway" { + client { + endpoint = "http://otlp-gateway.example.com" + headers = { + } + tls { + insecure = false + insecure_skip_verify = false + } + } +} + +// Feature: Auto-Instrumentation +declare "auto_instrumentation" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "beyla_pods" { + role = "pod" + namespaces { + own_namespace = true + } + selectors { + role = "pod" + label = "app.kubernetes.io/name=beyla" + } + } + + discovery.relabel "beyla_pods" { + targets = discovery.kubernetes.beyla_pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + action = "replace" + target_label = "instance" + } + } + + prometheus.scrape "beyla_applications" { + targets = discovery.relabel.beyla_pods.output + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "beyla_internal" { + targets = discovery.relabel.beyla_pods.output + metrics_path = "/internal/metrics" + job_name = "integrations/beyla" + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } +} +auto_instrumentation "feature" { + metrics_destinations = [ + otelcol.receiver.prometheus.otlp_gateway.receiver, + ] +} + +// Self Reporting +prometheus.exporter.unix "kubernetes_monitoring_telemetry" { + set_collectors = ["textfile"] + textfile { + directory = "/etc/alloy" + } +} + +discovery.relabel "kubernetes_monitoring_telemetry" { + targets = prometheus.exporter.unix.kubernetes_monitoring_telemetry.targets + rule { + target_label = "instance" + action = "replace" + replacement = "ko" + } + rule { + target_label = "job" + action = "replace" + replacement = "integrations/kubernetes/kubernetes_monitoring_telemetry" + } +} + +prometheus.scrape "kubernetes_monitoring_telemetry" { + job_name = "integrations/kubernetes/kubernetes_monitoring_telemetry" + targets = discovery.relabel.kubernetes_monitoring_telemetry.output + scrape_interval = "1h" + clustering { + enabled = true + } + forward_to = [prometheus.relabel.kubernetes_monitoring_telemetry.receiver] +} + +prometheus.relabel "kubernetes_monitoring_telemetry" { + rule { + source_labels = ["__name__"] + regex = "grafana_kubernetes_monitoring_.*" + action = "keep" + } + forward_to = [ + otelcol.receiver.prometheus.otlp_gateway.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-receiver.alloy new file mode 100644 index 000000000..b02894951 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-receiver.alloy @@ -0,0 +1,164 @@ +// Destination: otlp-gateway (otlp) +otelcol.receiver.prometheus "otlp_gateway" { + output { + metrics = [otelcol.processor.transform.otlp_gateway.input] + } +} +otelcol.receiver.loki "otlp_gateway" { + output { + logs = [otelcol.processor.transform.otlp_gateway.input] + } +} + +otelcol.processor.transform "otlp_gateway" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + log_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + trace_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + + output { + metrics = [otelcol.exporter.otlp.otlp_gateway.input] + logs = [otelcol.exporter.otlp.otlp_gateway.input] + traces = [otelcol.exporter.otlp.otlp_gateway.input] + } +} +otelcol.exporter.otlp "otlp_gateway" { + client { + endpoint = "http://otlp-gateway.example.com" + headers = { + } + tls { + insecure = false + insecure_skip_verify = false + } + } +} + +// Feature: Application Observability +declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } +} +application_observability "feature" { + metrics_destinations = [ + otelcol.processor.transform.otlp_gateway.input, + ] + logs_destinations = [ + otelcol.processor.transform.otlp_gateway.input, + ] + traces_destinations = [ + otelcol.processor.transform.otlp_gateway.input, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/output.yaml b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/output.yaml new file mode 100644 index 000000000..934ca6c25 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/output.yaml @@ -0,0 +1,1094 @@ +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-metrics + namespace: default + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-receiver + namespace: default + labels: + helm.sh/chart: alloy-receiver-0.9.2 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-beyla + namespace: default + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: rbac +automountServiceAccountToken: true +--- +# Source: k8s-monitoring/templates/alloy-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-metrics + namespace: default +data: + config.alloy: |- + // Destination: otlp-gateway (otlp) + otelcol.receiver.prometheus "otlp_gateway" { + output { + metrics = [otelcol.processor.transform.otlp_gateway.input] + } + } + otelcol.receiver.loki "otlp_gateway" { + output { + logs = [otelcol.processor.transform.otlp_gateway.input] + } + } + + otelcol.processor.transform "otlp_gateway" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + log_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + trace_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + + output { + metrics = [otelcol.exporter.otlp.otlp_gateway.input] + logs = [otelcol.exporter.otlp.otlp_gateway.input] + traces = [otelcol.exporter.otlp.otlp_gateway.input] + } + } + otelcol.exporter.otlp "otlp_gateway" { + client { + endpoint = "http://otlp-gateway.example.com" + headers = { + } + tls { + insecure = false + insecure_skip_verify = false + } + } + } + + // Feature: Auto-Instrumentation + declare "auto_instrumentation" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "beyla_pods" { + role = "pod" + namespaces { + own_namespace = true + } + selectors { + role = "pod" + label = "app.kubernetes.io/name=beyla" + } + } + + discovery.relabel "beyla_pods" { + targets = discovery.kubernetes.beyla_pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + action = "replace" + target_label = "instance" + } + } + + prometheus.scrape "beyla_applications" { + targets = discovery.relabel.beyla_pods.output + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "beyla_internal" { + targets = discovery.relabel.beyla_pods.output + metrics_path = "/internal/metrics" + job_name = "integrations/beyla" + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + } + auto_instrumentation "feature" { + metrics_destinations = [ + otelcol.receiver.prometheus.otlp_gateway.receiver, + ] + } + + // Self Reporting + prometheus.exporter.unix "kubernetes_monitoring_telemetry" { + set_collectors = ["textfile"] + textfile { + directory = "/etc/alloy" + } + } + + discovery.relabel "kubernetes_monitoring_telemetry" { + targets = prometheus.exporter.unix.kubernetes_monitoring_telemetry.targets + rule { + target_label = "instance" + action = "replace" + replacement = "ko" + } + rule { + target_label = "job" + action = "replace" + replacement = "integrations/kubernetes/kubernetes_monitoring_telemetry" + } + } + + prometheus.scrape "kubernetes_monitoring_telemetry" { + job_name = "integrations/kubernetes/kubernetes_monitoring_telemetry" + targets = discovery.relabel.kubernetes_monitoring_telemetry.output + scrape_interval = "1h" + clustering { + enabled = true + } + forward_to = [prometheus.relabel.kubernetes_monitoring_telemetry.receiver] + } + + prometheus.relabel "kubernetes_monitoring_telemetry" { + rule { + source_labels = ["__name__"] + regex = "grafana_kubernetes_monitoring_.*" + action = "keep" + } + forward_to = [ + otelcol.receiver.prometheus.otlp_gateway.receiver, + ] + } + + + + + self-reporting-metric.prom: | + # HELP grafana_kubernetes_monitoring_build_info A metric to report the version of the Kubernetes Monitoring Helm chart + # TYPE grafana_kubernetes_monitoring_build_info gauge + grafana_kubernetes_monitoring_build_info{version="2.0.0-rc.2", namespace="default"} 1 + # HELP grafana_kubernetes_monitoring_feature_info A metric to report the enabled features of the Kubernetes Monitoring Helm chart + # TYPE grafana_kubernetes_monitoring_feature_info gauge + grafana_kubernetes_monitoring_feature_info{feature="applicationObservability", protocols="otlpgrpc", version="1.0.0"} 1 + grafana_kubernetes_monitoring_feature_info{feature="autoInstrumentation", version="1.0.0"} 1 +--- +# Source: k8s-monitoring/templates/alloy-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-receiver + namespace: default +data: + config.alloy: |- + // Destination: otlp-gateway (otlp) + otelcol.receiver.prometheus "otlp_gateway" { + output { + metrics = [otelcol.processor.transform.otlp_gateway.input] + } + } + otelcol.receiver.loki "otlp_gateway" { + output { + logs = [otelcol.processor.transform.otlp_gateway.input] + } + } + + otelcol.processor.transform "otlp_gateway" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + log_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + trace_statements { + context = "resource" + statements = [ + "set(attributes[\"cluster\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"cluster\"] == nil", + "set(attributes[\"k8s.cluster.name\"], \"annotation-autodiscovery-with-traces-cluster\") where attributes[\"k8s.cluster.name\"] == nil", + ] + } + + output { + metrics = [otelcol.exporter.otlp.otlp_gateway.input] + logs = [otelcol.exporter.otlp.otlp_gateway.input] + traces = [otelcol.exporter.otlp.otlp_gateway.input] + } + } + otelcol.exporter.otlp "otlp_gateway" { + client { + endpoint = "http://otlp-gateway.example.com" + headers = { + } + tls { + insecure = false + insecure_skip_verify = false + } + } + } + + // Feature: Application Observability + declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } + } + application_observability "feature" { + metrics_destinations = [ + otelcol.processor.transform.otlp_gateway.input, + ] + logs_destinations = [ + otelcol.processor.transform.otlp_gateway.input, + ] + traces_destinations = [ + otelcol.processor.transform.otlp_gateway.input, + ] + } +--- +# Source: k8s-monitoring/templates/beyla-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-beyla + namespace: default + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: config +data: + beyla-config.yml: |- + attributes: + kubernetes: + enable: true + cluster_name: annotation-autodiscovery-with-traces-cluster + select: + beyla_network_flow_bytes: + include: + - 'k8s.src.owner.type' + - 'k8s.dst.owner.type' + - 'direction' + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + discovery: + services: + - k8s_namespace: . + exclude_services: + - exe_path: ".*alloy.*|.*otelcol.*|.*beyla.*" + internal_metrics: + prometheus: + port: 9090 + path: /internal/metrics + prometheus_export: + port: 9090 + path: /metrics + features: + - application + - network + - application_service_graph + - application_span + otel_traces_export: + endpoint: http://ko-alloy-receiver.default.svc.cluster.local:4317 +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.9.2 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/cluster-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-beyla + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: rbac +rules: + - apiGroups: [ "apps" ] + resources: [ "replicasets" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "pods", "services", "nodes" ] + verbs: [ "list", "watch", "get" ] +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-metrics +subjects: + - kind: ServiceAccount + name: ko-alloy-metrics + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.9.2 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-receiver +subjects: + - kind: ServiceAccount + name: ko-alloy-receiver + namespace: default +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/cluster-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-beyla + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: rbac +subjects: + - kind: ServiceAccount + name: ko-beyla + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-beyla +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics-cluster + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + clusterIP: 'None' + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + ports: + # Do not include the -metrics suffix in the port name, otherwise metrics + # can be double-collected with the non-headless Service if it's also + # enabled. + # + # This service should only be used for clustering, and not metric + # collection. + - name: http + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.9.2 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.9.2 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-receiver + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.4.3 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + - containerPort: 4317 + name: otlp-grpc + protocol: TCP + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-receiver +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/daemon-set.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-beyla + namespace: default + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: workload +spec: + selector: + matchLabels: + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + k8s.grafana.com/logs.job: integrations/beyla + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: workload + spec: + serviceAccountName: ko-beyla + hostPID: true + containers: + - name: beyla + image: docker.io/grafana/beyla:1.8.6 + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + ports: + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: BEYLA_CONFIG_PATH + value: "/etc/beyla/config/beyla-config.yml" + volumeMounts: + - mountPath: /etc/beyla/config + name: beyla-config + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: beyla-config + configMap: + name: ko-beyla +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + replicas: 1 + podManagementPolicy: Parallel + minReadySeconds: 10 + serviceName: ko-alloy-metrics + selector: + matchLabels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + k8s.grafana.com/logs.job: integrations/alloy + labels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-metrics + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.4.3 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --cluster.enabled=true + - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.name="alloy-metrics" + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/values.yaml b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/values.yaml new file mode 100644 index 000000000..ed23f7358 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/values.yaml @@ -0,0 +1,32 @@ +--- +cluster: + name: annotation-autodiscovery-with-traces-cluster + +destinations: + - name: otlp-gateway + type: otlp + url: http://otlp-gateway.example.com + metrics: {enabled: true} + logs: {enabled: true} + traces: {enabled: true} + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +autoInstrumentation: + enabled: true + +alloy-metrics: + enabled: true + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/README.md b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/README.md new file mode 100644 index 000000000..05a61b777 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/README.md @@ -0,0 +1,24 @@ + +# Example: features/auto-instrumentation/beyla-metrics/values.yaml + +## Values + +```yaml +--- +cluster: + name: annotation-autodiscovery-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + +autoInstrumentation: + enabled: true + +alloy-metrics: + enabled: true +``` diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/alloy-metrics.alloy new file mode 100644 index 000000000..8937b2965 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/alloy-metrics.alloy @@ -0,0 +1,136 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + tls_config { + insecure_skip_verify = false + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "annotation-autodiscovery-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "annotation-autodiscovery-cluster" + target_label = "cluster" + } + } +} + +// Feature: Auto-Instrumentation +declare "auto_instrumentation" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "beyla_pods" { + role = "pod" + namespaces { + own_namespace = true + } + selectors { + role = "pod" + label = "app.kubernetes.io/name=beyla" + } + } + + discovery.relabel "beyla_pods" { + targets = discovery.kubernetes.beyla_pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + action = "replace" + target_label = "instance" + } + } + + prometheus.scrape "beyla_applications" { + targets = discovery.relabel.beyla_pods.output + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "beyla_internal" { + targets = discovery.relabel.beyla_pods.output + metrics_path = "/internal/metrics" + job_name = "integrations/beyla" + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } +} +auto_instrumentation "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] +} + +// Self Reporting +prometheus.exporter.unix "kubernetes_monitoring_telemetry" { + set_collectors = ["textfile"] + textfile { + directory = "/etc/alloy" + } +} + +discovery.relabel "kubernetes_monitoring_telemetry" { + targets = prometheus.exporter.unix.kubernetes_monitoring_telemetry.targets + rule { + target_label = "instance" + action = "replace" + replacement = "ko" + } + rule { + target_label = "job" + action = "replace" + replacement = "integrations/kubernetes/kubernetes_monitoring_telemetry" + } +} + +prometheus.scrape "kubernetes_monitoring_telemetry" { + job_name = "integrations/kubernetes/kubernetes_monitoring_telemetry" + targets = discovery.relabel.kubernetes_monitoring_telemetry.output + scrape_interval = "1h" + clustering { + enabled = true + } + forward_to = [prometheus.relabel.kubernetes_monitoring_telemetry.receiver] +} + +prometheus.relabel "kubernetes_monitoring_telemetry" { + rule { + source_labels = ["__name__"] + regex = "grafana_kubernetes_monitoring_.*" + action = "keep" + } + forward_to = [ + prometheus.remote_write.prometheus.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/output.yaml b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/output.yaml new file mode 100644 index 000000000..3ecad40ae --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/output.yaml @@ -0,0 +1,631 @@ +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-metrics + namespace: default + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-beyla + namespace: default + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: rbac +automountServiceAccountToken: true +--- +# Source: k8s-monitoring/templates/alloy-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-metrics + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + tls_config { + insecure_skip_verify = false + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "annotation-autodiscovery-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "annotation-autodiscovery-cluster" + target_label = "cluster" + } + } + } + + // Feature: Auto-Instrumentation + declare "auto_instrumentation" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "beyla_pods" { + role = "pod" + namespaces { + own_namespace = true + } + selectors { + role = "pod" + label = "app.kubernetes.io/name=beyla" + } + } + + discovery.relabel "beyla_pods" { + targets = discovery.kubernetes.beyla_pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + action = "replace" + target_label = "instance" + } + } + + prometheus.scrape "beyla_applications" { + targets = discovery.relabel.beyla_pods.output + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "beyla_internal" { + targets = discovery.relabel.beyla_pods.output + metrics_path = "/internal/metrics" + job_name = "integrations/beyla" + honor_labels = true + scrape_interval = "60s" + clustering { + enabled = true + } + forward_to = argument.metrics_destinations.value + } + } + auto_instrumentation "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] + } + + // Self Reporting + prometheus.exporter.unix "kubernetes_monitoring_telemetry" { + set_collectors = ["textfile"] + textfile { + directory = "/etc/alloy" + } + } + + discovery.relabel "kubernetes_monitoring_telemetry" { + targets = prometheus.exporter.unix.kubernetes_monitoring_telemetry.targets + rule { + target_label = "instance" + action = "replace" + replacement = "ko" + } + rule { + target_label = "job" + action = "replace" + replacement = "integrations/kubernetes/kubernetes_monitoring_telemetry" + } + } + + prometheus.scrape "kubernetes_monitoring_telemetry" { + job_name = "integrations/kubernetes/kubernetes_monitoring_telemetry" + targets = discovery.relabel.kubernetes_monitoring_telemetry.output + scrape_interval = "1h" + clustering { + enabled = true + } + forward_to = [prometheus.relabel.kubernetes_monitoring_telemetry.receiver] + } + + prometheus.relabel "kubernetes_monitoring_telemetry" { + rule { + source_labels = ["__name__"] + regex = "grafana_kubernetes_monitoring_.*" + action = "keep" + } + forward_to = [ + prometheus.remote_write.prometheus.receiver, + ] + } + + + + + self-reporting-metric.prom: | + # HELP grafana_kubernetes_monitoring_build_info A metric to report the version of the Kubernetes Monitoring Helm chart + # TYPE grafana_kubernetes_monitoring_build_info gauge + grafana_kubernetes_monitoring_build_info{version="2.0.0-rc.2", namespace="default"} 1 + # HELP grafana_kubernetes_monitoring_feature_info A metric to report the enabled features of the Kubernetes Monitoring Helm chart + # TYPE grafana_kubernetes_monitoring_feature_info gauge + grafana_kubernetes_monitoring_feature_info{feature="autoInstrumentation", version="1.0.0"} 1 +--- +# Source: k8s-monitoring/templates/beyla-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-beyla + namespace: default + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: config +data: + beyla-config.yml: |- + attributes: + kubernetes: + enable: true + cluster_name: annotation-autodiscovery-cluster + select: + beyla_network_flow_bytes: + include: + - 'k8s.src.owner.type' + - 'k8s.dst.owner.type' + - 'direction' + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + discovery: + services: + - k8s_namespace: . + exclude_services: + - exe_path: ".*alloy.*|.*otelcol.*|.*beyla.*" + internal_metrics: + prometheus: + port: 9090 + path: /internal/metrics + prometheus_export: + port: 9090 + path: /metrics + features: + - application + - network + - application_service_graph + - application_span +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/cluster-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-beyla + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: rbac +rules: + - apiGroups: [ "apps" ] + resources: [ "replicasets" ] + verbs: [ "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "pods", "services", "nodes" ] + verbs: [ "list", "watch", "get" ] +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-metrics +subjects: + - kind: ServiceAccount + name: ko-alloy-metrics + namespace: default +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/cluster-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-beyla + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: rbac +subjects: + - kind: ServiceAccount + name: ko-beyla + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-beyla +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics-cluster + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + clusterIP: 'None' + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + ports: + # Do not include the -metrics suffix in the port name, otherwise metrics + # can be double-collected with the non-headless Service if it's also + # enabled. + # + # This service should only be used for clustering, and not metric + # collection. + - name: http + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/daemon-set.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-beyla + namespace: default + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: workload +spec: + selector: + matchLabels: + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + k8s.grafana.com/logs.job: integrations/beyla + labels: + helm.sh/chart: beyla-1.4.11 + app.kubernetes.io/name: beyla + app.kubernetes.io/instance: ko + app.kubernetes.io/version: "1.8.6" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: beyla + app.kubernetes.io/component: workload + spec: + serviceAccountName: ko-beyla + hostPID: true + containers: + - name: beyla + image: docker.io/grafana/beyla:1.8.6 + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + ports: + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: BEYLA_CONFIG_PATH + value: "/etc/beyla/config/beyla-config.yml" + volumeMounts: + - mountPath: /etc/beyla/config + name: beyla-config + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: beyla-config + configMap: + name: ko-beyla +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.9.2 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + replicas: 1 + podManagementPolicy: Parallel + minReadySeconds: 10 + serviceName: ko-alloy-metrics + selector: + matchLabels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + k8s.grafana.com/logs.job: integrations/alloy + labels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-metrics + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.4.3 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --cluster.enabled=true + - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.name="alloy-metrics" + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/values.yaml b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/values.yaml new file mode 100644 index 000000000..b887ce574 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/values.yaml @@ -0,0 +1,14 @@ +--- +cluster: + name: annotation-autodiscovery-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + +autoInstrumentation: + enabled: true + +alloy-metrics: + enabled: true diff --git a/charts/k8s-monitoring/templates/beyla-config.yaml b/charts/k8s-monitoring/templates/beyla-config.yaml new file mode 100644 index 000000000..1912350e0 --- /dev/null +++ b/charts/k8s-monitoring/templates/beyla-config.yaml @@ -0,0 +1,66 @@ +{{- if and .Values.autoInstrumentation.enabled }} +{{- $grpcReceiverEndpoint := include "features.applicationObservability.receiver.grpc" . | trim }} +{{- $httpReceiverEndpoint := include "features.applicationObservability.receiver.http" . | trim }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "beyla.fullname" .Subcharts.autoInstrumentation.Subcharts.beyla }} + namespace: {{ include "beyla.namespace" .Subcharts.autoInstrumentation.Subcharts.beyla }} + labels: + {{- include "beyla.labels" .Subcharts.autoInstrumentation.Subcharts.beyla | nindent 4 }} + app.kubernetes.io/component: config + {{- with .Values.autoInstrumentation.beyla.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + beyla-config.yml: |- + attributes: + kubernetes: + enable: true + cluster_name: {{ .Values.cluster.name }} + select: + beyla_network_flow_bytes: + include: + - 'k8s.src.owner.type' + - 'k8s.dst.owner.type' + - 'direction' + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' +{{- if eq .Values.autoInstrumentation.beyla.preset "network" }} + {{- if not .Values.autoInstrumentation.beyla.config.data.network }} + network: + enable: true + {{- end }} +{{- end }} +{{- if eq .Values.autoInstrumentation.beyla.preset "application" }} + {{- if not .Values.autoInstrumentation.beyla.config.data.discovery }} + discovery: + services: + - k8s_namespace: . + exclude_services: + - exe_path: ".*alloy.*|.*otelcol.*|.*beyla.*" + {{- end }} +{{- end }} + internal_metrics: + prometheus: + port: {{ .Values.autoInstrumentation.beyla.service.targetPort }} + path: /internal/metrics + prometheus_export: + port: {{ .Values.autoInstrumentation.beyla.service.targetPort }} + path: /metrics + features: + - application + - network + - application_service_graph + - application_span +{{- if or $grpcReceiverEndpoint $httpReceiverEndpoint }} + otel_traces_export: + endpoint: {{ $grpcReceiverEndpoint | default $httpReceiverEndpoint }} +{{- end }} +{{- end }} diff --git a/charts/k8s-monitoring/templates/collectors/_collector_validations.tpl b/charts/k8s-monitoring/templates/collectors/_collector_validations.tpl index e333b1335..737e72df6 100644 --- a/charts/k8s-monitoring/templates/collectors/_collector_validations.tpl +++ b/charts/k8s-monitoring/templates/collectors/_collector_validations.tpl @@ -4,56 +4,17 @@ {{- $msg = append $msg " enabled: false" }} {{- $errorMessage := join "\n" $msg }} -{{- $collectorName := "alloy-metrics" }} -{{- if (index .Values $collectorName).enabled }} - {{- $atLeastOneFeatureEnabled := or .Values.clusterMetrics.enabled .Values.annotationAutodiscovery.enabled .Values.prometheusOperatorObjects.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).remoteConfig.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).extraConfig }} - {{- $integrationsConfigured := include "feature.integrations.configured.metrics" .Subcharts.integrations | fromYamlArray }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (not (empty $integrationsConfigured)) }} - - {{- if not $atLeastOneFeatureEnabled }} - {{- fail (printf $errorMessage $collectorName $collectorName) }} - {{- end }} -{{- end }} - -{{- $collectorName = "alloy-singleton" }} -{{- if (index .Values $collectorName).enabled }} - {{- $atLeastOneFeatureEnabled := .Values.clusterEvents.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).remoteConfig.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).extraConfig }} - {{- if not $atLeastOneFeatureEnabled }} - {{- fail (printf $errorMessage $collectorName $collectorName) }} - {{- end }} -{{- end }} - -{{- $collectorName = "alloy-logs" }} -{{- if (index .Values $collectorName).enabled }} - {{- $atLeastOneFeatureEnabled := .Values.podLogs.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).remoteConfig.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).extraConfig }} - {{- if not $atLeastOneFeatureEnabled }} - {{- fail (printf $errorMessage $collectorName $collectorName) }} - {{- end }} -{{- end }} - -{{- $collectorName = "alloy-receiver" }} -{{- if (index .Values $collectorName).enabled }} - {{- $atLeastOneFeatureEnabled := or .Values.applicationObservability.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).remoteConfig.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).extraConfig }} - {{- if not $atLeastOneFeatureEnabled }} - {{- fail (printf $errorMessage $collectorName $collectorName) }} - {{- end }} +{{- $collectorsUtilized := list }} +{{- range $feature := include "features.list.enabled" . | fromYamlArray }} + {{- $collectorsUtilized = concat $collectorsUtilized (include (printf "features.%s.collectors" $feature) $ | fromYamlArray) }} {{- end }} -{{- $collectorName = "alloy-profiles" }} -{{- if (index .Values $collectorName).enabled }} - {{- $atLeastOneFeatureEnabled := .Values.profiling.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).remoteConfig.enabled }} - {{- $atLeastOneFeatureEnabled = or $atLeastOneFeatureEnabled (index .Values $collectorName).extraConfig }} - {{- if not $atLeastOneFeatureEnabled }} - {{- fail (printf $errorMessage $collectorName $collectorName) }} +{{- range $collector := include "collectors.list.enabled" . | fromYamlArray }} + {{- $usedByAFeature := has $collector $collectorsUtilized }} + {{- $extraConfigDefined := not (not (index $.Values $collector).extraConfig) }} + {{- $remoteConfigEnabled := (index $.Values $collector).remoteConfig.enabled }} + {{- if not (or $usedByAFeature $extraConfigDefined $remoteConfigEnabled) }} + {{- fail (printf $errorMessage $collector $collector) }} {{- end }} {{- end }} {{- end }} diff --git a/charts/k8s-monitoring/templates/features/_feature_annotation_autodiscovery.tpl b/charts/k8s-monitoring/templates/features/_feature_annotation_autodiscovery.tpl index e0d428dbc..b10d49546 100644 --- a/charts/k8s-monitoring/templates/features/_feature_annotation_autodiscovery.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_annotation_autodiscovery.tpl @@ -30,6 +30,8 @@ annotation_autodiscovery "feature" { {{- $featureName := "Annotation Autodiscovery" }} {{- $destinations := include "features.annotationAutodiscovery.destinations" . | fromYamlArray }} {{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "metrics" "ecosystem" "prometheus" "feature" $featureName) }} -{{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-metrics" "feature" $featureName) }} +{{- range $collector := include "features.annotationAutodiscovery.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} +{{- end -}} {{- end -}} {{- end -}} diff --git a/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl b/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl index 198f47e17..2782b1618 100644 --- a/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_application_observability.tpl @@ -47,16 +47,29 @@ application_observability "feature" { {{- $traceDestinations := include "destinations.get" (dict "destinations" $.Values.destinations "type" "traces" "ecosystem" "otlp" "filter" $.Values.applicationObservability.destinations) | fromYamlArray -}} {{- include "destinations.validate_destination_list" (dict "destinations" $traceDestinations "type" "traces" "ecosystem" "otlp" "feature" $featureName) }} -{{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-receiver" "feature" $featureName) }} -{{- if $.Values.applicationObservability.receivers.grpc.enabled }} - {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" "alloy-receiver" "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.grpc.port "portName" "otlp-grpc" "portProtocol" "TCP") }} +{{- range $collector := include "features.applicationObservability.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} + {{- if $.Values.applicationObservability.receivers.grpc.enabled }} + {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.grpc.port "portName" "otlp-grpc" "portProtocol" "TCP") }} + {{- end -}} + {{- if $.Values.applicationObservability.receivers.http.enabled }} + {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.http.port "portName" "otlp-http" "portProtocol" "TCP") }} + {{- end -}} + {{- if $.Values.applicationObservability.receivers.zipkin.enabled }} + {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" $collector "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.zipkin.port "portName" "zipkin" "portProtocol" "TCP") }} + {{- end -}} + {{- include "feature.applicationObservability.validate" (dict "Values" $.Values.applicationObservability) }} {{- end -}} -{{- if $.Values.applicationObservability.receivers.http.enabled }} - {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" "alloy-receiver" "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.http.port "portName" "otlp-http" "portProtocol" "TCP") }} -{{- end -}} -{{- if $.Values.applicationObservability.receivers.zipkin.enabled }} - {{- include "collectors.require_extra_port" (dict "Values" $.Values "name" "alloy-receiver" "feature" $featureName "portNumber" $.Values.applicationObservability.receivers.zipkin.port "portName" "zipkin" "portProtocol" "TCP") }} -{{- end -}} -{{- include "feature.applicationObservability.validate" (dict "Values" $.Values.applicationObservability) }} {{- end -}} {{- end -}} + +{{- define "features.applicationObservability.receiver.grpc" }} + {{- if and .Values.applicationObservability.enabled .Values.applicationObservability.receivers.grpc.enabled }} +http://{{ include "alloy.fullname" (index .Subcharts .Values.applicationObservability.collector) }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.applicationObservability.receivers.grpc.port }} + {{- end }} +{{- end }} +{{- define "features.applicationObservability.receiver.http" }} + {{- if and .Values.applicationObservability.enabled .Values.applicationObservability.receivers.http.enabled }} +http://{{ include "alloy.fullname" (index .Subcharts .Values.applicationObservability.collector) }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.applicationObservability.receivers.http.port }} + {{- end }} +{{- end }} diff --git a/charts/k8s-monitoring/templates/features/_feature_auto_instrumentation.tpl b/charts/k8s-monitoring/templates/features/_feature_auto_instrumentation.tpl new file mode 100644 index 000000000..c7855fdd7 --- /dev/null +++ b/charts/k8s-monitoring/templates/features/_feature_auto_instrumentation.tpl @@ -0,0 +1,37 @@ +{{- define "features.autoInstrumentation.enabled" }}{{ .Values.autoInstrumentation.enabled }}{{- end }} + +{{- define "features.autoInstrumentation.collectors" }} +{{- if .Values.autoInstrumentation.enabled -}} +- {{ .Values.autoInstrumentation.collector }} +{{- end }} +{{- end }} + +{{- define "features.autoInstrumentation.include" }} +{{- if .Values.autoInstrumentation.enabled -}} +{{- $destinations := include "features.autoInstrumentation.destinations" . | fromYamlArray }} +// Feature: Auto-Instrumentation +{{- include "feature.autoInstrumentation.module" (dict "Values" $.Values.autoInstrumentation "Files" $.Subcharts.autoInstrumentation.Files) }} +auto_instrumentation "feature" { + metrics_destinations = [ + {{ include "destinations.alloy.targets" (dict "destinations" $.Values.destinations "names" $destinations "type" "metrics" "ecosystem" "prometheus") | indent 4 | trim }} + ] +} +{{- end -}} +{{- end -}} + +{{- define "features.autoInstrumentation.destinations" }} +{{- if .Values.autoInstrumentation.enabled -}} +{{- include "destinations.get" (dict "destinations" $.Values.destinations "type" "metrics" "ecosystem" "prometheus" "filter" $.Values.autoInstrumentation.destinations) -}} +{{- end -}} +{{- end -}} + +{{- define "features.autoInstrumentation.validate" }} +{{- if .Values.autoInstrumentation.enabled -}} +{{- $featureName := "Auto-Instrumentation" }} +{{- $destinations := include "features.autoInstrumentation.destinations" . | fromYamlArray }} +{{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "metrics" "ecosystem" "prometheus" "feature" $featureName) }} +{{- range $collector := include "features.autoInstrumentation.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k8s-monitoring/templates/features/_feature_cluster_events.tpl b/charts/k8s-monitoring/templates/features/_feature_cluster_events.tpl index 0758348ef..9a4f09b23 100644 --- a/charts/k8s-monitoring/templates/features/_feature_cluster_events.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_cluster_events.tpl @@ -30,6 +30,8 @@ cluster_events "feature" { {{- $featureName := "Kubernetes Cluster events" }} {{- $destinations := include "features.clusterEvents.destinations" . | fromYamlArray }} {{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "logs" "ecosystem" "loki" "feature" $featureName) }} -{{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-singleton" "feature" $featureName) }} +{{- range $collector := include "features.clusterEvents.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} +{{- end -}} {{- end -}} {{- end -}} diff --git a/charts/k8s-monitoring/templates/features/_feature_cluster_metrics.tpl b/charts/k8s-monitoring/templates/features/_feature_cluster_metrics.tpl index d605f8fdd..21e165e8d 100644 --- a/charts/k8s-monitoring/templates/features/_feature_cluster_metrics.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_cluster_metrics.tpl @@ -30,6 +30,8 @@ cluster_metrics "feature" { {{- $featureName := "Kubernetes Cluster metrics" }} {{- $destinations := include "features.clusterMetrics.destinations" . | fromYamlArray }} {{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "metrics" "ecosystem" "prometheus" "feature" $featureName) }} -{{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-metrics" "feature" $featureName) }} +{{- range $collector := include "features.clusterMetrics.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} +{{- end -}} {{- end -}} {{- end -}} diff --git a/charts/k8s-monitoring/templates/features/_feature_helpers.tpl b/charts/k8s-monitoring/templates/features/_feature_helpers.tpl index 0e49df667..d8f0e7c2f 100644 --- a/charts/k8s-monitoring/templates/features/_feature_helpers.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_helpers.tpl @@ -1,6 +1,7 @@ {{- define "features.list" }} - annotationAutodiscovery - applicationObservability +- autoInstrumentation - clusterMetrics - clusterEvents - podLogs diff --git a/charts/k8s-monitoring/templates/features/_feature_integrations.tpl b/charts/k8s-monitoring/templates/features/_feature_integrations.tpl index ae8a3d524..9fa0c99f8 100644 --- a/charts/k8s-monitoring/templates/features/_feature_integrations.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_integrations.tpl @@ -63,7 +63,6 @@ {{- if $metricIntegrations }} {{- $metricDestinations := include "features.integrations.destinations" . | fromYamlArray }} {{- include "destinations.validate_destination_list" (dict "destinations" $metricDestinations "type" "metrics" "ecosystem" "prometheus" "feature" $featureName) }} - {{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-metrics" "feature" $featureName) }} {{- end }} {{- $podLogsEnabled := include "features.podLogs.enabled" $ }} diff --git a/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl b/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl index 66c762f40..cdef7193e 100644 --- a/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl @@ -36,6 +36,8 @@ pod_logs "feature" { {{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "logs" "ecosystem" "loki" "feature" $featureName) }} {{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-logs" "feature" $featureName) }} -{{- include "feature.podLogs.collector.validate" (dict "Values" $.Values.podLogs "Collector" (index .Values "alloy-logs") "CollectorName" "alloy-logs") }} +{{- range $collector := include "features.podLogs.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} +{{- end -}} {{- end -}} {{- end -}} diff --git a/charts/k8s-monitoring/templates/features/_feature_profiling.tpl b/charts/k8s-monitoring/templates/features/_feature_profiling.tpl index f16917647..067143418 100644 --- a/charts/k8s-monitoring/templates/features/_feature_profiling.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_profiling.tpl @@ -30,6 +30,8 @@ profiling "feature" { {{- $featureName := "Profiling" }} {{- $destinations := include "features.profiling.destinations" . | fromYamlArray }} {{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "profiles" "ecosystem" "pyroscope" "feature" $featureName) }} -{{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-profiles" "feature" $featureName) }} +{{- range $collector := include "features.profiling.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} +{{- end -}} {{- end -}} {{- end -}} diff --git a/charts/k8s-monitoring/templates/features/_feature_prometheus_operator_obejcts.tpl b/charts/k8s-monitoring/templates/features/_feature_prometheus_operator_obejcts.tpl index e1ca57e40..85d6a10d8 100644 --- a/charts/k8s-monitoring/templates/features/_feature_prometheus_operator_obejcts.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_prometheus_operator_obejcts.tpl @@ -30,7 +30,9 @@ prometheus_operator_objects "feature" { {{- $featureName := "Prometheus Operator Objects" }} {{- $destinations := include "features.prometheusOperatorObjects.destinations" . | fromYamlArray }} {{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "metrics" "ecosystem" "prometheus" "feature" $featureName) }} -{{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-metrics" "feature" $featureName) }} +{{- range $collector := include "features.prometheusOperatorObjects.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} +{{- end -}} {{- include "feature.prometheusOperatorObjects.validate" (dict "Values" $.Values.prometheusOperatorObjects) }} {{- end -}} {{- end -}} diff --git a/charts/k8s-monitoring/tests/beyla_config_test.yaml b/charts/k8s-monitoring/tests/beyla_config_test.yaml new file mode 100644 index 000000000..b48b1a3f6 --- /dev/null +++ b/charts/k8s-monitoring/tests/beyla_config_test.yaml @@ -0,0 +1,108 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test Beyla config values +templates: + - beyla-config.yaml +tests: + - it: creates a ConfigMap for Beyla + set: + cluster: {name: beyla-config-cluster} + autoInstrumentation: {enabled: true} + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["beyla-config.yml"] + value: |- + attributes: + kubernetes: + enable: true + cluster_name: beyla-config-cluster + select: + beyla_network_flow_bytes: + include: + - 'k8s.src.owner.type' + - 'k8s.dst.owner.type' + - 'direction' + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + discovery: + services: + - k8s_namespace: . + exclude_services: + - exe_path: ".*alloy.*|.*otelcol.*|.*beyla.*" + internal_metrics: + prometheus: + port: 9090 + path: /internal/metrics + prometheus_export: + port: 9090 + path: /metrics + features: + - application + - network + - application_service_graph + - application_span + + - it: sets the otel_traces_export endpoint if applicationObservability is enabled + set: + deployAsConfigMap: true + cluster: {name: beyla-config-cluster} + autoInstrumentation: {enabled: true} + applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["beyla-config.yml"] + value: |- + attributes: + kubernetes: + enable: true + cluster_name: beyla-config-cluster + select: + beyla_network_flow_bytes: + include: + - 'k8s.src.owner.type' + - 'k8s.dst.owner.type' + - 'direction' + filter: + network: + k8s_dst_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + k8s_src_owner_name: + not_match: '{kube*,*jaeger-agent*,*prometheus*,*promtail*,*grafana-agent*}' + discovery: + services: + - k8s_namespace: . + exclude_services: + - exe_path: ".*alloy.*|.*otelcol.*|.*beyla.*" + internal_metrics: + prometheus: + port: 9090 + path: /internal/metrics + prometheus_export: + port: 9090 + path: /metrics + features: + - application + - network + - application_service_graph + - application_span + otel_traces_export: + endpoint: http://RELEASE-NAME-alloy-receiver.NAMESPACE.svc.cluster.local:4317 diff --git a/charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/grafana.yaml b/charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/grafana.yaml new file mode 100644 index 000000000..0c9fb4c98 --- /dev/null +++ b/charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/grafana.yaml @@ -0,0 +1,9 @@ +--- +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: Prometheus + type: prometheus + url: http://prometheus-server.prometheus.svc:9090 + isDefault: true diff --git a/charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/prometheus.yaml b/charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/prometheus.yaml new file mode 100644 index 000000000..23e225d1b --- /dev/null +++ b/charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/prometheus.yaml @@ -0,0 +1,30 @@ +--- +server: + extraFlags: + - enable-feature=remote-write-receiver + + persistentVolume: + enabled: false + + service: + servicePort: 9090 + +serverFiles: + prometheus.yml: + scrape_configs: [] + +configmapReload: + prometheus: + enabled: false + +alertmanager: + enabled: false + +kube-state-metrics: + enabled: false + +prometheus-node-exporter: + enabled: false + +prometheus-pushgateway: + enabled: false diff --git a/charts/k8s-monitoring/tests/integration/auto-instrumentation/test-manifest.yaml b/charts/k8s-monitoring/tests/integration/auto-instrumentation/test-manifest.yaml new file mode 100644 index 000000000..e9aa866b5 --- /dev/null +++ b/charts/k8s-monitoring/tests/integration/auto-instrumentation/test-manifest.yaml @@ -0,0 +1,15 @@ +--- +prerequisites: + - type: helm + name: prometheus + repo: https://prometheus-community.github.io/helm-charts + chart: prometheus + namespace: prometheus + valuesFile: charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/prometheus.yaml + + - type: helm + name: grafana + repo: https://grafana.github.io/helm-charts + chart: grafana + namespace: grafana + valuesFile: charts/k8s-monitoring/tests/integration/auto-instrumentation/configs/grafana.yaml diff --git a/charts/k8s-monitoring/tests/integration/auto-instrumentation/test-values.yaml b/charts/k8s-monitoring/tests/integration/auto-instrumentation/test-values.yaml new file mode 100644 index 000000000..5ff8aff7a --- /dev/null +++ b/charts/k8s-monitoring/tests/integration/auto-instrumentation/test-values.yaml @@ -0,0 +1,10 @@ +--- +tests: + - env: + PROMETHEUS_URL: http://prometheus-server.prometheus.svc:9090/api/v1/query + queries: + # Self reporting metrics + - query: grafana_kubernetes_monitoring_build_info{cluster="auto-instrumentation-integration-test"} + type: promql + - query: grafana_kubernetes_monitoring_feature_info{cluster="auto-instrumentation-integration-test", feature="autoInstrumentation"} + type: promql diff --git a/charts/k8s-monitoring/tests/integration/auto-instrumentation/values.yaml b/charts/k8s-monitoring/tests/integration/auto-instrumentation/values.yaml new file mode 100644 index 000000000..7084e27fc --- /dev/null +++ b/charts/k8s-monitoring/tests/integration/auto-instrumentation/values.yaml @@ -0,0 +1,16 @@ +--- +cluster: + name: auto-instrumentation-integration-test + +destinations: + - name: localPrometheus + type: prometheus + url: http://prometheus-server.prometheus.svc:9090/api/v1/write + +autoInstrumentation: + enabled: true + +selfReporting: {scrapeInterval: 1m} # Force self-report to be generated within test time + +alloy-metrics: + enabled: true diff --git a/charts/k8s-monitoring/values.schema.json b/charts/k8s-monitoring/values.schema.json index b3430eed8..20202e52f 100644 --- a/charts/k8s-monitoring/values.schema.json +++ b/charts/k8s-monitoring/values.schema.json @@ -865,6 +865,20 @@ } } }, + "autoInstrumentation": { + "type": "object", + "properties": { + "collector": { + "type": "string" + }, + "destinations": { + "type": "array" + }, + "enabled": { + "type": "boolean" + } + } + }, "cluster": { "type": "object", "properties": { diff --git a/charts/k8s-monitoring/values.yaml b/charts/k8s-monitoring/values.yaml index 2e215f520..eb83b42dc 100644 --- a/charts/k8s-monitoring/values.yaml +++ b/charts/k8s-monitoring/values.yaml @@ -116,6 +116,25 @@ applicationObservability: # @ignored collector: alloy-receiver +# -- Auto-Instrumentation. +# Requires destinations that supports metrics, logs, and traces. +# To see the valid options, please see the [Application Observability feature documentation](https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-application-observability). +# @default -- Disabled +# @section -- Features - Auto-Instrumentation +autoInstrumentation: + # -- Enable gathering Kubernetes Pod logs. + # @section -- Features - Auto-Instrumentation + enabled: false + + # -- The destinations where application data will be sent. If empty, all capable destinations will be used. + # @section -- Features - Auto-Instrumentation + destinations: [] + + # -- Which collector to assign this feature to. Do not change this unless you are sure of what you are doing. + # @section -- Features - Auto-Instrumentation + # @ignored + collector: alloy-metrics + # -- Annotation Autodiscovery enables gathering metrics from Kubernetes Pods and Services discovered by special annotations. # Requires a destination that supports metrics. # To see the valid options, please see the [Annotation Autodiscovery feature documentation](https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-annotation-autodiscovery). diff --git a/scripts/run-integration-test.sh b/scripts/run-integration-test.sh index 59bb8f11d..39bb95f66 100755 --- a/scripts/run-integration-test.sh +++ b/scripts/run-integration-test.sh @@ -66,13 +66,13 @@ if ! kind get clusters | grep -q "${clusterName}"; then fi fi -echo "Deploying prerequisites..." prerequisiteCount=$(yq -r '.prerequisites | length' "${testManifest}") for ((i=0; i