diff --git a/charts/feature-annotation-autodiscovery/README.md b/charts/feature-annotation-autodiscovery/README.md index 8642d7234..9b00f5a66 100644 --- a/charts/feature-annotation-autodiscovery/README.md +++ b/charts/feature-annotation-autodiscovery/README.md @@ -8,7 +8,7 @@ ![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) Gathers metrics automatically based on Kubernetes Pod and Service annotations -The annotation-based autodiscovery feature makes it very simple to add scrape targets. With this feature enabled, any +The annotation-based autodiscovery feature makes it easy to add scrape targets. With this feature enabled, any Kubernetes Pods or Services with the `k8s.grafana.com/scrape` annotation set to `true` will be automatically discovered and scraped by the collector. There are several other annotations that can be used to customize the behavior of the scrape configuration, such as: diff --git a/charts/feature-annotation-autodiscovery/README.md.gotmpl b/charts/feature-annotation-autodiscovery/README.md.gotmpl index 9a3635076..78ecdaf5a 100644 --- a/charts/feature-annotation-autodiscovery/README.md.gotmpl +++ b/charts/feature-annotation-autodiscovery/README.md.gotmpl @@ -9,7 +9,7 @@ {{ template "chart.description" . }} {{ template "chart.homepageLine" . }} -The annotation-based autodiscovery feature makes it very simple to add scrape targets. With this feature enabled, any +The annotation-based autodiscovery feature makes it easy to add scrape targets. With this feature enabled, any Kubernetes Pods or Services with the `k8s.grafana.com/scrape` annotation set to `true` will be automatically discovered and scraped by the collector. There are several other annotations that can be used to customize the behavior of the scrape configuration, such as: diff --git a/charts/feature-pod-logs/templates/_collector_validation.tpl b/charts/feature-pod-logs/templates/_collector_validation.tpl index 60f2900dd..34ecabb39 100644 --- a/charts/feature-pod-logs/templates/_collector_validation.tpl +++ b/charts/feature-pod-logs/templates/_collector_validation.tpl @@ -6,14 +6,14 @@ {{- fail (printf "Pod Logs feature requires Alloy to be a DaemonSet when using the \"volumes\" gather method.\nPlease set:\n%s:\n controller:\n type: daemonset" .CollectorName) }} {{- end -}} {{- if not .Collector.alloy.mounts.varlog }} - {{- fail (printf "Pod Logs feature requires Alloy to mount /var/log when using the \"volumes\" gather method.\nPlease set:\n%s:\n controller:\n mounts:\n varlog: true" .CollectorName) }} + {{- fail (printf "Pod Logs feature requires Alloy to mount /var/log when using the \"volumes\" gather method.\nPlease set:\n%s:\n alloy:\n mounts:\n varlog: true" .CollectorName) }} {{- end -}} {{- if .Collector.alloy.clustering.enabled }} {{- fail (printf "Pod Logs feature requires Alloy to not be in clustering mode when using the \"volumes\" gather method.\nPlease set:\n%s:\n alloy:\n clustering:\n enabled: true" .CollectorName) }} {{- end -}} {{- else if eq .Values.gatherMethod "kubernetesApi" }} - {{- if not .Collector.alloy.mounts.varlog }} - {{- fail (printf "Pod Logs feature should not mount /var/log when using the \"kubernetesApi\" gather method.\nPlease set:\n%s:\n controller:\n mounts:\n varlog: false" .CollectorName) }} + {{- if .Collector.alloy.mounts.varlog }} + {{- fail (printf "Pod Logs feature should not mount /var/log when using the \"kubernetesApi\" gather method.\nPlease set:\n%s:\n alloy:\n mounts:\n varlog: false" .CollectorName) }} {{- end -}} {{- if not .Collector.alloy.clustering.enabled }} {{- fail (printf "Pod Logs feature requires Alloy to be in clustering mode when using the \"kubernetesApi\" gather method.\nPlease set:\n%s:\n alloy:\n clustering:\n enabled: true" .CollectorName) }} diff --git a/charts/k8s-monitoring/Chart.lock b/charts/k8s-monitoring/Chart.lock index 21d768bcd..5cadbfcb1 100644 --- a/charts/k8s-monitoring/Chart.lock +++ b/charts/k8s-monitoring/Chart.lock @@ -42,4 +42,4 @@ dependencies: repository: https://grafana.github.io/helm-charts version: 0.7.0 digest: sha256:a7478342074296ebf188fa7f1f61da0acebcf11cbe7293a9c5b933d834bcc30e -generated: "2024-10-04T16:06:31.559013-05:00" +generated: "2024-10-07T15:10:21.807216-05:00" diff --git a/charts/k8s-monitoring/Makefile b/charts/k8s-monitoring/Makefile index bbc5dd897..d6edaeb7e 100644 --- a/charts/k8s-monitoring/Makefile +++ b/charts/k8s-monitoring/Makefile @@ -1,5 +1,6 @@ HAS_HELM_DOCS := $(shell command -v helm-docs;) HAS_HELM_UNITTEST := $(shell helm plugin list | grep unittest 2> /dev/null) +HAS_SHELLSPEC := $(shell command -v shellspec;) CHART_TEMPLATE_FILES = $(shell find templates -name "*.tpl") CHART_YAML_FILES = $(shell find templates -name "*.yaml") @@ -115,6 +116,14 @@ Chart.lock: Chart.yaml .PHONY: examples examples: $(EXAMPLE_OUTPUT_FILES) $(EXAMPLE_ALLOY_FILES) $(EXAMPLE_README_FILES) +.PHONY: example-checks +example-checks: $(EXAMPLE_OUTPUT_FILES) +ifdef HAS_SHELLSPEC + shellspec -c tests/example-checks +else + docker run --platform linux/amd64 --rm --volume $(shell pwd):/src shellspec/shellspec -c /src/tests/example-checks -s /bin/sh +endif + .PHONY: clean clean: rm -f README.md values.schema.json templates/destinations/_destination_types.tpl schema-mods/destination-list.json $(DESTINATION_SCHEMA_FILES) $(DESTINATION_DOCS_FILES) @@ -126,7 +135,7 @@ build: README.md examples values.schema.json templates/destinations/_destination # Test targets .PHONY: test unittest lint-helm lint-configs -test: unittest lint-helm lint-configs +test: unittest lint-helm lint-configs example-checks lint-configs: $(EXAMPLE_ALLOY_FILES) ../../scripts/lint-alloy.sh $(EXAMPLE_ALLOY_FILES) diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz index 80a8b6ed6..656ec5c4e 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz index d437736b5..7a8af015a 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz index 7c5a08ef3..130f06c3b 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz index 848f64d89..1bdfabc46 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-frontend-observability-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-frontend-observability-1.0.0.tgz index fde9b7f36..9c8bcefd1 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-frontend-observability-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-frontend-observability-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz index b3f0c743a..278408ab1 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz index 47b04349b..dd9473716 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz index 0bc2e3027..ecf10f548 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz index ae6e66e71..f1fe6fdaf 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/destinations/loki-values.yaml b/charts/k8s-monitoring/destinations/loki-values.yaml index f1091fc95..ae5531c70 100644 --- a/charts/k8s-monitoring/destinations/loki-values.yaml +++ b/charts/k8s-monitoring/destinations/loki-values.yaml @@ -33,11 +33,11 @@ extraHeadersFrom: {} # -- Custom labels to be added to all logs and events. # All values are treated as strings and automatically quoted. # @section -- General -externalLabels: {} +extraLabels: {} # -- Custom labels to be added to all logs and events through a dynamic reference. # All values are treated as raw strings and not quoted. # @section -- General -externalLabelsFrom: {} +extraLabelsFrom: {} auth: # -- The type of authentication to do. @@ -77,9 +77,10 @@ auth: bearerTokenFrom: "" secret: - # -- Whether to create a secret for this Loki destination. + # -- (bool) Whether to create a secret for this Loki destination. + # @default -- `true` # @section -- Secret - create: true + create: # -- If true, skip secret creation and embed the credentials directly into the configuration. # @section -- Secret embed: false diff --git a/charts/k8s-monitoring/destinations/otlp-values.yaml b/charts/k8s-monitoring/destinations/otlp-values.yaml index cde15b716..c347bb731 100644 --- a/charts/k8s-monitoring/destinations/otlp-values.yaml +++ b/charts/k8s-monitoring/destinations/otlp-values.yaml @@ -27,10 +27,6 @@ logs: # @section -- General url: "" -# -- The Proxy URL for the OTLP destination. -# @section -- General -proxyURL: "" - # -- The tenant ID for the OTLP destination. # @section -- General tenantId: "" @@ -88,9 +84,10 @@ auth: bearerTokenFrom: "" secret: - # -- Whether to create a secret for this Prometheus destination. + # -- (bool) Whether to create a secret for this Prometheus destination. + # @default -- `true` # @section -- Secret - create: true + create: # -- If true, skip secret creation and embed the credentials directly into the configuration. # @section -- Secret embed: false diff --git a/charts/k8s-monitoring/destinations/prometheus-values.yaml b/charts/k8s-monitoring/destinations/prometheus-values.yaml index f67a4815e..a74b756b5 100644 --- a/charts/k8s-monitoring/destinations/prometheus-values.yaml +++ b/charts/k8s-monitoring/destinations/prometheus-values.yaml @@ -50,7 +50,7 @@ metricProcessingRules: "" auth: # -- The type of authentication to do. - # Options are "none" (default), "basic", "bearerToken". + # Options are "none" (default), "basic", "bearerToken", "sigv4". # @default -- none # @section -- Authentication type: "none" @@ -85,10 +85,45 @@ auth: # @section -- Authentication - Bearer Token bearerTokenFrom: "" + # Authentication using AWS Signature Version 4 + sigv4: + # -- The access key for sigv4 authentication. + # @section -- Authentication - SigV4 + accessKey: "" + # -- The key for storing the access key in the secret. + # @section -- Authentication - SigV4 + accessKeyKey: "accessKey" + # -- Raw config for accessing the access key. + # @section -- Authentication - SigV4 + accessKeyFrom: "" + + # -- The secret key for sigv4 authentication. + # @section -- Authentication - SigV4 + secretKey: "" + # -- The key for storing the secret key in the secret. + # @section -- Authentication - Sig + secretKeyKey: "secretKey" + # -- Raw config for accessing the secret key. + # @section -- Authentication - SigV4 + secretKeyFrom: "" + + # -- The named AWS profile for sigv4 authentication. + # @section -- Authentication - SigV4 + profile: "" + + # -- The AWS region for sigv4 authentication. + # @section -- Authentication - SigV4 + region: "" + + # -- The Role ARN for sigv4 authentication. + # @section -- Authentication - SigV4 + roleArn: "" + secret: - # -- Whether to create a secret for this Prometheus destination. + # -- (bool) Whether to create a secret for this Prometheus destination. + # @default -- `true` # @section -- Secret - create: true + create: # -- If true, skip secret creation and embed the credentials directly into the configuration. # @section -- Secret embed: false diff --git a/charts/k8s-monitoring/destinations/pyroscope-values.yaml b/charts/k8s-monitoring/destinations/pyroscope-values.yaml index 15ec04301..e7bd8cfcf 100644 --- a/charts/k8s-monitoring/destinations/pyroscope-values.yaml +++ b/charts/k8s-monitoring/destinations/pyroscope-values.yaml @@ -68,9 +68,10 @@ auth: bearerTokenFrom: "" secret: - # -- Whether to create a secret for this Pyroscope destination. + # -- (bool) Whether to create a secret for this Pyroscope destination. + # @default -- `true` # @section -- Secret - create: true + create: # -- If true, skip secret creation and embed the credentials directly into the configuration. # @section -- Secret embed: false diff --git a/charts/k8s-monitoring/docs/destinations/loki.md b/charts/k8s-monitoring/docs/destinations/loki.md index 8d9fd56da..fc1cc5bb7 100644 --- a/charts/k8s-monitoring/docs/destinations/loki.md +++ b/charts/k8s-monitoring/docs/destinations/loki.md @@ -33,10 +33,10 @@ This defines the options for defining a destination for logs that use the Loki p | Key | Type | Default | Description | |-----|------|---------|-------------| -| externalLabels | object | `{}` | Custom labels to be added to all logs and events. All values are treated as strings and automatically quoted. | -| externalLabelsFrom | object | `{}` | Custom labels to be added to all logs and events through a dynamic reference. All values are treated as raw strings and not quoted. | | extraHeaders | object | `{}` | Extra headers to be set when sending data. All values are treated as strings and automatically quoted. | | extraHeadersFrom | object | `{}` | Extra headers to be set when sending data through a dynamic reference. All values are treated as raw strings and not quoted. | +| extraLabels | object | `{}` | Custom labels to be added to all logs and events. All values are treated as strings and automatically quoted. | +| extraLabelsFrom | object | `{}` | Custom labels to be added to all logs and events through a dynamic reference. All values are treated as raw strings and not quoted. | | name | string | `""` | The name for this Loki destination. | | proxyURL | string | `""` | The Proxy URL for the Loki destination. | | tenantId | string | `""` | The tenant ID for the Loki destination. | diff --git a/charts/k8s-monitoring/docs/destinations/otlp.md b/charts/k8s-monitoring/docs/destinations/otlp.md index d8a51fd22..8832d7446 100644 --- a/charts/k8s-monitoring/docs/destinations/otlp.md +++ b/charts/k8s-monitoring/docs/destinations/otlp.md @@ -37,7 +37,6 @@ This defines the options for defining a destination for OpenTelemetry data that | extraHeadersFrom | object | `{}` | Extra headers to be set when sending data through a dynamic reference. All values are treated as raw strings and not quoted. | | name | string | `""` | The name for this OTLP destination. | | protocol | string | `"grpc"` | The protocol for the OTLP destination. Options are "grpc" (default), "http". | -| proxyURL | string | `""` | The Proxy URL for the OTLP destination. | | readBufferSize | string | `""` | Size of the read buffer the gRPC client to use for reading server responses. | | tenantId | string | `""` | The tenant ID for the OTLP destination. | | tenantIdFrom | string | `""` | Raw config for accessing the tenant ID. | diff --git a/charts/k8s-monitoring/docs/destinations/prometheus.md b/charts/k8s-monitoring/docs/destinations/prometheus.md index c904762ac..da236bfc5 100644 --- a/charts/k8s-monitoring/docs/destinations/prometheus.md +++ b/charts/k8s-monitoring/docs/destinations/prometheus.md @@ -23,11 +23,30 @@ This defines the options for defining a destination for metrics that use the Pro | auth.usernameFrom | string | `""` | Raw config for accessing the username. | | auth.usernameKey | string | `"username"` | The key for storing the username in the secret. | +### Authentication - SigV4 + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| auth.sigv4.accessKey | string | `""` | The access key for sigv4 authentication. | +| auth.sigv4.accessKeyFrom | string | `""` | Raw config for accessing the access key. | +| auth.sigv4.accessKeyKey | string | `"accessKey"` | The key for storing the access key in the secret. | +| auth.sigv4.profile | string | `""` | The named AWS profile for sigv4 authentication. | +| auth.sigv4.region | string | `""` | The AWS region for sigv4 authentication. | +| auth.sigv4.roleArn | string | `""` | The Role ARN for sigv4 authentication. | +| auth.sigv4.secretKey | string | `""` | The secret key for sigv4 authentication. | +| auth.sigv4.secretKeyFrom | string | `""` | Raw config for accessing the secret key. | + +### Authentication - Sig + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| auth.sigv4.secretKeyKey | string | `"secretKey"` | The key for storing the secret key in the secret. | + ### Authentication | Key | Type | Default | Description | |-----|------|---------|-------------| -| auth.type | string | none | The type of authentication to do. Options are "none" (default), "basic", "bearerToken". | +| auth.type | string | none | The type of authentication to do. Options are "none" (default), "basic", "bearerToken", "sigv4". | ### General diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/README.md b/charts/k8s-monitoring/docs/examples/auth/bearer-token/README.md new file mode 100644 index 000000000..9a76476df --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/README.md @@ -0,0 +1,74 @@ + +# Bearer Token Authentication + +This example demonstrates how to use a bearer token for authentication. The Prometheus destination defines the bearer +token inside the values file. The Loki destination gets a bearer token from an environment variable defined on the +`alloy-logs` collector. And the OTLP destination gets a bearer token from a pre-existing Kubernetes secret. + +## Values + +```yaml +--- +cluster: + name: bearer-token-example-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + auth: + type: bearerToken + bearerToken: sample-bearer-token + + - name: loki + type: loki + url: http://loki.loki.svc:3100/loki/api/v1/push + auth: + type: bearerToken + bearerTokenFrom: env("LOKI_BEARER_TOKEN") + + - name: tempo + type: otlp + url: http://tempo.tempo.svc:4317 + auth: + type: bearerToken + bearerTokenKey: tempoBearerToken + secret: + create: false + name: my-tempo-secret + namespace: tempo + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +prometheusOperatorObjects: + enabled: true + +podLogs: + enabled: true + +alloy-metrics: + enabled: true + +alloy-logs: + enabled: true + alloy: + extraEnv: + - name: LOKI_BEARER_TOKEN + value: sample-bearer-token + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +``` diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-logs.alloy b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-logs.alloy new file mode 100644 index 000000000..7e70387d5 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-logs.alloy @@ -0,0 +1,130 @@ +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + bearer_token = env("LOKI_BEARER_TOKEN") + } + external_labels = { + cluster = "bearer-token-example-cluster", + "k8s_cluster_name" = "bearer-token-example-cluster", + } +} +// Feature: Pod Logs +declare "pod_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + discovery.relabel "filtered_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "$1" + target_label = "job" + } + + // set the container runtime as a label + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } + } + + discovery.kubernetes "pods" { + role = "pod" + selectors { + role = "pod" + field = "spec.nodeName=" + env("HOSTNAME") + } + } + + discovery.relabel "filtered_pods_with_paths" { + targets = discovery.relabel.filtered_pods.output + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "/var/log/pods/*$1/*.log" + target_label = "__path__" + } + } + + local.file_match "pod_logs" { + path_targets = discovery.relabel.filtered_pods_with_paths.output + } + + loki.source.file "pod_logs" { + targets = local.file_match.pod_logs.targets + forward_to = [loki.process.pod_logs.receiver] + } + + loki.process "pod_logs" { + stage.match { + selector = "{tmp_container_runtime=~\"containerd|cri-o\"}" + // the cri processing stage extracts the following k/v pairs: log, stream, time, flags + stage.cri {} + + // Set the extract flags and stream values as labels + stage.labels { + values = { + flags = "", + stream = "", + } + } + } + + stage.match { + selector = "{tmp_container_runtime=\"docker\"}" + // the docker processing stage extracts the following k/v pairs: log, stream, time + stage.docker {} + + // Set the extract stream value as a label + stage.labels { + values = { + stream = "", + } + } + } + + // Drop the filename label, since it's not really useful in the context of Kubernetes, where we already have cluster, + // namespace, pod, and container labels. Drop any structured metadata. Also drop the temporary + // container runtime label as it is no longer needed. + stage.label_drop { + values = [ + "filename", + "tmp_container_runtime", + ] + } + forward_to = argument.logs_destinations.value + } +} +pod_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-metrics.alloy new file mode 100644 index 000000000..27e1afde2 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-metrics.alloy @@ -0,0 +1,95 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + bearer_token = remote.kubernetes.secret.prometheus.data["bearerToken"] + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + } +} + +remote.kubernetes.secret "prometheus" { + name = "prometheus-ko-k8s-monitoring" + namespace = "default" +} + + +// Feature: Prometheus Operator Objects +declare "prometheus_operator_objects" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + // Prometheus Operator podMonitor objects + prometheus.operator.podmonitors "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator podMonitor objects + prometheus.operator.probes "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator ServiceMonitor objects + prometheus.operator.servicemonitors "service_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } +} +prometheus_operator_objects "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy new file mode 100644 index 000000000..9de0f57ed --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy @@ -0,0 +1,223 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + bearer_token = remote.kubernetes.secret.prometheus.data["bearerToken"] + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + } +} + +remote.kubernetes.secret "prometheus" { + name = "prometheus-ko-k8s-monitoring" + namespace = "default" +} + +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + bearer_token = env("LOKI_BEARER_TOKEN") + } + external_labels = { + cluster = "bearer-token-example-cluster", + "k8s_cluster_name" = "bearer-token-example-cluster", + } +} +// Destination: tempo (otlp) +otelcol.auth.bearer "tempo" { + token = remote.kubernetes.secret.tempo.data["tempoBearerToken"] +} + +otelcol.processor.transform "tempo" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"bearer-token-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + log_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"bearer-token-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + trace_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"bearer-token-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + + output { + metrics = [otelcol.exporter.otlp.tempo.input] + logs = [otelcol.exporter.otlp.tempo.input] + traces = [otelcol.exporter.otlp.tempo.input] + } +} +otelcol.exporter.otlp "tempo" { + client { + endpoint = "http://tempo.tempo.svc:4317" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.tempo.data["tenantId"]), + } + tls { + insecure = false + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.tempo.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.tempo.data["cert"]) + key_pem = remote.kubernetes.secret.tempo.data["key"] + } + } +} + +remote.kubernetes.secret "tempo" { + name = "my-tempo-secret" + namespace = "tempo" +} + + +// Feature: Application Observability +declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } +} +application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.transform.tempo.input, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/description.txt b/charts/k8s-monitoring/docs/examples/auth/bearer-token/description.txt new file mode 100644 index 000000000..b59b2909f --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/description.txt @@ -0,0 +1,5 @@ +# Bearer Token Authentication + +This example demonstrates how to use a bearer token for authentication. The Prometheus destination defines the bearer +token inside the values file. The Loki destination gets a bearer token from an environment variable defined on the +`alloy-logs` collector. And the OTLP destination gets a bearer token from a pre-existing Kubernetes secret. diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml b/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml new file mode 100644 index 000000000..f4606c740 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml @@ -0,0 +1,1345 @@ +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-logs + namespace: default + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-metrics + namespace: default + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-receiver + namespace: default + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/templates/destination_secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: "prometheus-ko-k8s-monitoring" + namespace: "default" +type: Opaque +data: + bearerToken: "c2FtcGxlLWJlYXJlci10b2tlbg==" +--- +# Source: k8s-monitoring/templates/alloy-logs-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-logs + namespace: default +data: + config.alloy: |- + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + bearer_token = env("LOKI_BEARER_TOKEN") + } + external_labels = { + cluster = "bearer-token-example-cluster", + "k8s_cluster_name" = "bearer-token-example-cluster", + } + } + // Feature: Pod Logs + declare "pod_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + discovery.relabel "filtered_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "$1" + target_label = "job" + } + + // set the container runtime as a label + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } + } + + discovery.kubernetes "pods" { + role = "pod" + selectors { + role = "pod" + field = "spec.nodeName=" + env("HOSTNAME") + } + } + + discovery.relabel "filtered_pods_with_paths" { + targets = discovery.relabel.filtered_pods.output + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "/var/log/pods/*$1/*.log" + target_label = "__path__" + } + } + + local.file_match "pod_logs" { + path_targets = discovery.relabel.filtered_pods_with_paths.output + } + + loki.source.file "pod_logs" { + targets = local.file_match.pod_logs.targets + forward_to = [loki.process.pod_logs.receiver] + } + + loki.process "pod_logs" { + stage.match { + selector = "{tmp_container_runtime=~\"containerd|cri-o\"}" + // the cri processing stage extracts the following k/v pairs: log, stream, time, flags + stage.cri {} + + // Set the extract flags and stream values as labels + stage.labels { + values = { + flags = "", + stream = "", + } + } + } + + stage.match { + selector = "{tmp_container_runtime=\"docker\"}" + // the docker processing stage extracts the following k/v pairs: log, stream, time + stage.docker {} + + // Set the extract stream value as a label + stage.labels { + values = { + stream = "", + } + } + } + + // Drop the filename label, since it's not really useful in the context of Kubernetes, where we already have cluster, + // namespace, pod, and container labels. Drop any structured metadata. Also drop the temporary + // container runtime label as it is no longer needed. + stage.label_drop { + values = [ + "filename", + "tmp_container_runtime", + ] + } + forward_to = argument.logs_destinations.value + } + } + pod_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] + } +--- +# Source: k8s-monitoring/templates/alloy-metrics-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-metrics + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + bearer_token = remote.kubernetes.secret.prometheus.data["bearerToken"] + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + } + } + + remote.kubernetes.secret "prometheus" { + name = "prometheus-ko-k8s-monitoring" + namespace = "default" + } + + + // Feature: Prometheus Operator Objects + declare "prometheus_operator_objects" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + // Prometheus Operator podMonitor objects + prometheus.operator.podmonitors "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator podMonitor objects + prometheus.operator.probes "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator ServiceMonitor objects + prometheus.operator.servicemonitors "service_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + } + prometheus_operator_objects "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] + } +--- +# Source: k8s-monitoring/templates/alloy-receiver-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-receiver + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + bearer_token = remote.kubernetes.secret.prometheus.data["bearerToken"] + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "bearer-token-example-cluster" + target_label = "cluster" + } + } + } + + remote.kubernetes.secret "prometheus" { + name = "prometheus-ko-k8s-monitoring" + namespace = "default" + } + + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + bearer_token = env("LOKI_BEARER_TOKEN") + } + external_labels = { + cluster = "bearer-token-example-cluster", + "k8s_cluster_name" = "bearer-token-example-cluster", + } + } + // Destination: tempo (otlp) + otelcol.auth.bearer "tempo" { + token = remote.kubernetes.secret.tempo.data["tempoBearerToken"] + } + + otelcol.processor.transform "tempo" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"bearer-token-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + log_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"bearer-token-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + trace_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"bearer-token-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + + output { + metrics = [otelcol.exporter.otlp.tempo.input] + logs = [otelcol.exporter.otlp.tempo.input] + traces = [otelcol.exporter.otlp.tempo.input] + } + } + otelcol.exporter.otlp "tempo" { + client { + endpoint = "http://tempo.tempo.svc:4317" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.tempo.data["tenantId"]), + } + tls { + insecure = false + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.tempo.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.tempo.data["cert"]) + key_pem = remote.kubernetes.secret.tempo.data["key"] + } + } + } + + remote.kubernetes.secret "tempo" { + name = "my-tempo-secret" + namespace = "tempo" + } + + + // Feature: Application Observability + declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } + } + application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.transform.tempo.input, + ] + } +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-logs +subjects: + - kind: ServiceAccount + name: ko-alloy-logs + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-metrics +subjects: + - kind: ServiceAccount + name: ko-alloy-metrics + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-receiver +subjects: + - kind: ServiceAccount + name: ko-alloy-receiver + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics-cluster + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + clusterIP: 'None' + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + ports: + # Do not include the -metrics suffix in the port name, otherwise metrics + # can be double-collected with the non-headless Service if it's also + # enabled. + # + # This service should only be used for clustering, and not metric + # collection. + - name: http + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-logs + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - + name: LOKI_BEARER_TOKEN + value: sample-bearer-token + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: varlog + mountPath: /var/log + readOnly: true + - name: dockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-logs + - name: varlog + hostPath: + path: /var/log + - name: dockercontainers + hostPath: + path: /var/lib/docker/containers +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-receiver + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + - containerPort: 4317 + name: otlp-grpc + protocol: TCP + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-receiver +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + replicas: 1 + podManagementPolicy: Parallel + minReadySeconds: 10 + serviceName: ko-alloy-metrics + selector: + matchLabels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + k8s.grafana.com/logs.job: integrations/alloy + labels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-metrics + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --cluster.enabled=true + - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.name="alloy-metrics" + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/values.yaml b/charts/k8s-monitoring/docs/examples/auth/bearer-token/values.yaml new file mode 100644 index 000000000..ba46b3caf --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/values.yaml @@ -0,0 +1,60 @@ +--- +cluster: + name: bearer-token-example-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + auth: + type: bearerToken + bearerToken: sample-bearer-token + + - name: loki + type: loki + url: http://loki.loki.svc:3100/loki/api/v1/push + auth: + type: bearerToken + bearerTokenFrom: env("LOKI_BEARER_TOKEN") + + - name: tempo + type: otlp + url: http://tempo.tempo.svc:4317 + auth: + type: bearerToken + bearerTokenKey: tempoBearerToken + secret: + create: false + name: my-tempo-secret + namespace: tempo + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +prometheusOperatorObjects: + enabled: true + +podLogs: + enabled: true + +alloy-metrics: + enabled: true + +alloy-logs: + enabled: true + alloy: + extraEnv: + - name: LOKI_BEARER_TOKEN + value: sample-bearer-token + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/README.md b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/README.md new file mode 100644 index 000000000..c6ad90110 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/README.md @@ -0,0 +1,75 @@ + +# Embedded Secrets Example + +This example shows how setting the `secret.embed = true` flag will skip creating Kubernetes Secrets and will embed the +secret data directly into the destination configuration. + +## Values + +```yaml +--- +cluster: + name: embedded-secrets-example-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + auth: + type: sigv4 + sigv4: + region: ap-southeast-2 + accessKey: my-access-key + secretKey: my-secret-key + secret: + embed: true + + - name: loki + type: loki + url: http://loki.loki.svc:3100/loki/api/v1/push + auth: + type: bearerToken + bearerToken: my-bearer-token + secret: + embed: true + + - name: tempo + type: otlp + url: http://tempo.tempo.svc:4317 + auth: + type: basic + username: my-username + password: my-password + secret: + embed: true + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +prometheusOperatorObjects: + enabled: true + +podLogs: + enabled: true + +alloy-metrics: + enabled: true + +alloy-logs: + enabled: true + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +``` diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-logs.alloy b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-logs.alloy new file mode 100644 index 000000000..da37bd803 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-logs.alloy @@ -0,0 +1,131 @@ +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = "" + bearer_token = "my-bearer-token" + } + external_labels = { + cluster = "embedded-secrets-example-cluster", + "k8s_cluster_name" = "embedded-secrets-example-cluster", + } +} +// Feature: Pod Logs +declare "pod_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + discovery.relabel "filtered_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "$1" + target_label = "job" + } + + // set the container runtime as a label + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } + } + + discovery.kubernetes "pods" { + role = "pod" + selectors { + role = "pod" + field = "spec.nodeName=" + env("HOSTNAME") + } + } + + discovery.relabel "filtered_pods_with_paths" { + targets = discovery.relabel.filtered_pods.output + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "/var/log/pods/*$1/*.log" + target_label = "__path__" + } + } + + local.file_match "pod_logs" { + path_targets = discovery.relabel.filtered_pods_with_paths.output + } + + loki.source.file "pod_logs" { + targets = local.file_match.pod_logs.targets + forward_to = [loki.process.pod_logs.receiver] + } + + loki.process "pod_logs" { + stage.match { + selector = "{tmp_container_runtime=~\"containerd|cri-o\"}" + // the cri processing stage extracts the following k/v pairs: log, stream, time, flags + stage.cri {} + + // Set the extract flags and stream values as labels + stage.labels { + values = { + flags = "", + stream = "", + } + } + } + + stage.match { + selector = "{tmp_container_runtime=\"docker\"}" + // the docker processing stage extracts the following k/v pairs: log, stream, time + stage.docker {} + + // Set the extract stream value as a label + stage.labels { + values = { + stream = "", + } + } + } + + // Drop the filename label, since it's not really useful in the context of Kubernetes, where we already have cluster, + // namespace, pod, and container labels. Drop any structured metadata. Also drop the temporary + // container runtime label as it is no longer needed. + stage.label_drop { + values = [ + "filename", + "tmp_container_runtime", + ] + } + forward_to = argument.logs_destinations.value + } +} +pod_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-metrics.alloy new file mode 100644 index 000000000..4cf9b4f97 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-metrics.alloy @@ -0,0 +1,93 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + sigv4 { + access_key = "my-access-key" + region = "ap-southeast-2" + secret_key = "my-secret-key" + } + tls_config { + insecure_skip_verify = false + ca_pem = "" + cert_pem = "" + key_pem = "" + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + } +} + + +// Feature: Prometheus Operator Objects +declare "prometheus_operator_objects" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + // Prometheus Operator podMonitor objects + prometheus.operator.podmonitors "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator podMonitor objects + prometheus.operator.probes "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator ServiceMonitor objects + prometheus.operator.servicemonitors "service_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } +} +prometheus_operator_objects "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-receiver.alloy new file mode 100644 index 000000000..f9dba560e --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-receiver.alloy @@ -0,0 +1,217 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + sigv4 { + access_key = "my-access-key" + region = "ap-southeast-2" + secret_key = "my-secret-key" + } + tls_config { + insecure_skip_verify = false + ca_pem = "" + cert_pem = "" + key_pem = "" + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + } +} +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = "" + bearer_token = "my-bearer-token" + } + external_labels = { + cluster = "embedded-secrets-example-cluster", + "k8s_cluster_name" = "embedded-secrets-example-cluster", + } +} +// Destination: tempo (otlp) +otelcol.auth.basic "tempo" { + username = "my-username" + password = "my-password" +} + +otelcol.processor.transform "tempo" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"embedded-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + log_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"embedded-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + trace_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"embedded-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + + output { + metrics = [otelcol.exporter.otlp.tempo.input] + logs = [otelcol.exporter.otlp.tempo.input] + traces = [otelcol.exporter.otlp.tempo.input] + } +} +otelcol.exporter.otlp "tempo" { + client { + endpoint = "http://tempo.tempo.svc:4317" + headers = { + "X-Scope-OrgID" = "", + } + tls { + insecure = false + insecure_skip_verify = false + ca_pem = "" + cert_pem = "" + key_pem = "" + } + } +} + + +// Feature: Application Observability +declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } +} +application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.transform.tempo.input, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/description.txt b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/description.txt new file mode 100644 index 000000000..f85762348 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/description.txt @@ -0,0 +1,4 @@ +# Embedded Secrets Example + +This example shows how setting the `secret.embed = true` flag will skip creating Kubernetes Secrets and will embed the +secret data directly into the destination configuration. diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/output.yaml b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/output.yaml new file mode 100644 index 000000000..34229b355 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/output.yaml @@ -0,0 +1,1325 @@ +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-logs + namespace: default + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-metrics + namespace: default + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-receiver + namespace: default + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/templates/alloy-logs-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-logs + namespace: default +data: + config.alloy: |- + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = "" + bearer_token = "my-bearer-token" + } + external_labels = { + cluster = "embedded-secrets-example-cluster", + "k8s_cluster_name" = "embedded-secrets-example-cluster", + } + } + // Feature: Pod Logs + declare "pod_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + discovery.relabel "filtered_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "$1" + target_label = "job" + } + + // set the container runtime as a label + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } + } + + discovery.kubernetes "pods" { + role = "pod" + selectors { + role = "pod" + field = "spec.nodeName=" + env("HOSTNAME") + } + } + + discovery.relabel "filtered_pods_with_paths" { + targets = discovery.relabel.filtered_pods.output + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "/var/log/pods/*$1/*.log" + target_label = "__path__" + } + } + + local.file_match "pod_logs" { + path_targets = discovery.relabel.filtered_pods_with_paths.output + } + + loki.source.file "pod_logs" { + targets = local.file_match.pod_logs.targets + forward_to = [loki.process.pod_logs.receiver] + } + + loki.process "pod_logs" { + stage.match { + selector = "{tmp_container_runtime=~\"containerd|cri-o\"}" + // the cri processing stage extracts the following k/v pairs: log, stream, time, flags + stage.cri {} + + // Set the extract flags and stream values as labels + stage.labels { + values = { + flags = "", + stream = "", + } + } + } + + stage.match { + selector = "{tmp_container_runtime=\"docker\"}" + // the docker processing stage extracts the following k/v pairs: log, stream, time + stage.docker {} + + // Set the extract stream value as a label + stage.labels { + values = { + stream = "", + } + } + } + + // Drop the filename label, since it's not really useful in the context of Kubernetes, where we already have cluster, + // namespace, pod, and container labels. Drop any structured metadata. Also drop the temporary + // container runtime label as it is no longer needed. + stage.label_drop { + values = [ + "filename", + "tmp_container_runtime", + ] + } + forward_to = argument.logs_destinations.value + } + } + pod_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] + } +--- +# Source: k8s-monitoring/templates/alloy-metrics-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-metrics + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + sigv4 { + access_key = "my-access-key" + region = "ap-southeast-2" + secret_key = "my-secret-key" + } + tls_config { + insecure_skip_verify = false + ca_pem = "" + cert_pem = "" + key_pem = "" + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + } + } + + + // Feature: Prometheus Operator Objects + declare "prometheus_operator_objects" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + // Prometheus Operator podMonitor objects + prometheus.operator.podmonitors "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator podMonitor objects + prometheus.operator.probes "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator ServiceMonitor objects + prometheus.operator.servicemonitors "service_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + } + prometheus_operator_objects "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] + } +--- +# Source: k8s-monitoring/templates/alloy-receiver-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-receiver + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + sigv4 { + access_key = "my-access-key" + region = "ap-southeast-2" + secret_key = "my-secret-key" + } + tls_config { + insecure_skip_verify = false + ca_pem = "" + cert_pem = "" + key_pem = "" + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "embedded-secrets-example-cluster" + target_label = "cluster" + } + } + } + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = "" + bearer_token = "my-bearer-token" + } + external_labels = { + cluster = "embedded-secrets-example-cluster", + "k8s_cluster_name" = "embedded-secrets-example-cluster", + } + } + // Destination: tempo (otlp) + otelcol.auth.basic "tempo" { + username = "my-username" + password = "my-password" + } + + otelcol.processor.transform "tempo" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"embedded-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + log_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"embedded-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + trace_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"embedded-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + + output { + metrics = [otelcol.exporter.otlp.tempo.input] + logs = [otelcol.exporter.otlp.tempo.input] + traces = [otelcol.exporter.otlp.tempo.input] + } + } + otelcol.exporter.otlp "tempo" { + client { + endpoint = "http://tempo.tempo.svc:4317" + headers = { + "X-Scope-OrgID" = "", + } + tls { + insecure = false + insecure_skip_verify = false + ca_pem = "" + cert_pem = "" + key_pem = "" + } + } + } + + + // Feature: Application Observability + declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } + } + application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.transform.tempo.input, + ] + } +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-logs +subjects: + - kind: ServiceAccount + name: ko-alloy-logs + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-metrics +subjects: + - kind: ServiceAccount + name: ko-alloy-metrics + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-receiver +subjects: + - kind: ServiceAccount + name: ko-alloy-receiver + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics-cluster + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + clusterIP: 'None' + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + ports: + # Do not include the -metrics suffix in the port name, otherwise metrics + # can be double-collected with the non-headless Service if it's also + # enabled. + # + # This service should only be used for clustering, and not metric + # collection. + - name: http + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-logs + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: varlog + mountPath: /var/log + readOnly: true + - name: dockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-logs + - name: varlog + hostPath: + path: /var/log + - name: dockercontainers + hostPath: + path: /var/lib/docker/containers +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-receiver + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + - containerPort: 4317 + name: otlp-grpc + protocol: TCP + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-receiver +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + replicas: 1 + podManagementPolicy: Parallel + minReadySeconds: 10 + serviceName: ko-alloy-metrics + selector: + matchLabels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + k8s.grafana.com/logs.job: integrations/alloy + labels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-metrics + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --cluster.enabled=true + - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.name="alloy-metrics" + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/values.yaml b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/values.yaml new file mode 100644 index 000000000..c8dee91b7 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/values.yaml @@ -0,0 +1,62 @@ +--- +cluster: + name: embedded-secrets-example-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + auth: + type: sigv4 + sigv4: + region: ap-southeast-2 + accessKey: my-access-key + secretKey: my-secret-key + secret: + embed: true + + - name: loki + type: loki + url: http://loki.loki.svc:3100/loki/api/v1/push + auth: + type: bearerToken + bearerToken: my-bearer-token + secret: + embed: true + + - name: tempo + type: otlp + url: http://tempo.tempo.svc:4317 + auth: + type: basic + username: my-username + password: my-password + secret: + embed: true + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +prometheusOperatorObjects: + enabled: true + +podLogs: + enabled: true + +alloy-metrics: + enabled: true + +alloy-logs: + enabled: true + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/README.md b/charts/k8s-monitoring/docs/examples/auth/external-secrets/README.md new file mode 100644 index 000000000..b2bc0f660 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/README.md @@ -0,0 +1,79 @@ + +# Authentication with Pre-existing Secrets + +This example demonstrates how to use pre-existing secrets to authenticate to external services. This allows for +credentials to be stored in different secret stores, as long as it resolves to a Kubernetes Secret. + +## Values + +```yaml +--- +cluster: + name: external-secrets-example-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + auth: + type: basic + usernameKey: prom-username + passwordKey: access-token + secret: + create: false + name: my-monitoring-secret + namespace: monitoring + + - name: loki + type: loki + url: http://loki.loki.svc:3100/loki/api/v1/push + auth: + type: basic + usernameKey: loki-username + passwordKey: access-token + secret: + create: false + name: my-monitoring-secret + namespace: monitoring + + - name: tempo + type: otlp + url: http://tempo.tempo.svc:4317 + auth: + type: bearerToken + bearerTokenKey: tempoBearerToken + secret: + create: false + name: my-tempo-secret + namespace: tempo + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +prometheusOperatorObjects: + enabled: true + +podLogs: + enabled: true + +alloy-metrics: + enabled: true + +alloy-logs: + enabled: true + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +``` diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-logs.alloy b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-logs.alloy new file mode 100644 index 000000000..8bbd0014c --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-logs.alloy @@ -0,0 +1,139 @@ +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = nonsensitive(remote.kubernetes.secret.loki.data["tenantId"]) + basic_auth { + username = nonsensitive(remote.kubernetes.secret.loki.data["loki-username"]) + password = remote.kubernetes.secret.loki.data["access-token"] + } + } + external_labels = { + cluster = "external-secrets-example-cluster", + "k8s_cluster_name" = "external-secrets-example-cluster", + } +} + +remote.kubernetes.secret "loki" { + name = "my-monitoring-secret" + namespace = "monitoring" +} +// Feature: Pod Logs +declare "pod_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + discovery.relabel "filtered_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "$1" + target_label = "job" + } + + // set the container runtime as a label + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } + } + + discovery.kubernetes "pods" { + role = "pod" + selectors { + role = "pod" + field = "spec.nodeName=" + env("HOSTNAME") + } + } + + discovery.relabel "filtered_pods_with_paths" { + targets = discovery.relabel.filtered_pods.output + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "/var/log/pods/*$1/*.log" + target_label = "__path__" + } + } + + local.file_match "pod_logs" { + path_targets = discovery.relabel.filtered_pods_with_paths.output + } + + loki.source.file "pod_logs" { + targets = local.file_match.pod_logs.targets + forward_to = [loki.process.pod_logs.receiver] + } + + loki.process "pod_logs" { + stage.match { + selector = "{tmp_container_runtime=~\"containerd|cri-o\"}" + // the cri processing stage extracts the following k/v pairs: log, stream, time, flags + stage.cri {} + + // Set the extract flags and stream values as labels + stage.labels { + values = { + flags = "", + stream = "", + } + } + } + + stage.match { + selector = "{tmp_container_runtime=\"docker\"}" + // the docker processing stage extracts the following k/v pairs: log, stream, time + stage.docker {} + + // Set the extract stream value as a label + stage.labels { + values = { + stream = "", + } + } + } + + // Drop the filename label, since it's not really useful in the context of Kubernetes, where we already have cluster, + // namespace, pod, and container labels. Drop any structured metadata. Also drop the temporary + // container runtime label as it is no longer needed. + stage.label_drop { + values = [ + "filename", + "tmp_container_runtime", + ] + } + forward_to = argument.logs_destinations.value + } +} +pod_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-metrics.alloy new file mode 100644 index 000000000..cb331d423 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-metrics.alloy @@ -0,0 +1,98 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + basic_auth { + username = nonsensitive(remote.kubernetes.secret.prometheus.data["prom-username"]) + password = remote.kubernetes.secret.prometheus.data["access-token"] + } + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + } +} + +remote.kubernetes.secret "prometheus" { + name = "my-monitoring-secret" + namespace = "monitoring" +} + + +// Feature: Prometheus Operator Objects +declare "prometheus_operator_objects" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + // Prometheus Operator podMonitor objects + prometheus.operator.podmonitors "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator podMonitor objects + prometheus.operator.probes "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator ServiceMonitor objects + prometheus.operator.servicemonitors "service_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } +} +prometheus_operator_objects "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-receiver.alloy new file mode 100644 index 000000000..e1461cb02 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-receiver.alloy @@ -0,0 +1,236 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + basic_auth { + username = nonsensitive(remote.kubernetes.secret.prometheus.data["prom-username"]) + password = remote.kubernetes.secret.prometheus.data["access-token"] + } + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + } +} + +remote.kubernetes.secret "prometheus" { + name = "my-monitoring-secret" + namespace = "monitoring" +} + +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = nonsensitive(remote.kubernetes.secret.loki.data["tenantId"]) + basic_auth { + username = nonsensitive(remote.kubernetes.secret.loki.data["loki-username"]) + password = remote.kubernetes.secret.loki.data["access-token"] + } + } + external_labels = { + cluster = "external-secrets-example-cluster", + "k8s_cluster_name" = "external-secrets-example-cluster", + } +} + +remote.kubernetes.secret "loki" { + name = "my-monitoring-secret" + namespace = "monitoring" +} + +// Destination: tempo (otlp) +otelcol.auth.bearer "tempo" { + token = remote.kubernetes.secret.tempo.data["tempoBearerToken"] +} + +otelcol.processor.transform "tempo" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"external-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + log_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"external-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + trace_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"external-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + + output { + metrics = [otelcol.exporter.otlp.tempo.input] + logs = [otelcol.exporter.otlp.tempo.input] + traces = [otelcol.exporter.otlp.tempo.input] + } +} +otelcol.exporter.otlp "tempo" { + client { + endpoint = "http://tempo.tempo.svc:4317" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.tempo.data["tenantId"]), + } + tls { + insecure = false + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.tempo.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.tempo.data["cert"]) + key_pem = remote.kubernetes.secret.tempo.data["key"] + } + } +} + +remote.kubernetes.secret "tempo" { + name = "my-tempo-secret" + namespace = "tempo" +} + + +// Feature: Application Observability +declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } +} +application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.transform.tempo.input, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/description.txt b/charts/k8s-monitoring/docs/examples/auth/external-secrets/description.txt new file mode 100644 index 000000000..d06aabb36 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/description.txt @@ -0,0 +1,4 @@ +# Authentication with Pre-existing Secrets + +This example demonstrates how to use pre-existing secrets to authenticate to external services. This allows for +credentials to be stored in different secret stores, as long as it resolves to a Kubernetes Secret. diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml b/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml new file mode 100644 index 000000000..a5dbe636d --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml @@ -0,0 +1,1357 @@ +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-logs + namespace: default + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-metrics + namespace: default + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ko-alloy-receiver + namespace: default + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/templates/alloy-logs-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-logs + namespace: default +data: + config.alloy: |- + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = nonsensitive(remote.kubernetes.secret.loki.data["tenantId"]) + basic_auth { + username = nonsensitive(remote.kubernetes.secret.loki.data["loki-username"]) + password = remote.kubernetes.secret.loki.data["access-token"] + } + } + external_labels = { + cluster = "external-secrets-example-cluster", + "k8s_cluster_name" = "external-secrets-example-cluster", + } + } + + remote.kubernetes.secret "loki" { + name = "my-monitoring-secret" + namespace = "monitoring" + } + // Feature: Pod Logs + declare "pod_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + discovery.relabel "filtered_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "$1" + target_label = "job" + } + + // set the container runtime as a label + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } + } + + discovery.kubernetes "pods" { + role = "pod" + selectors { + role = "pod" + field = "spec.nodeName=" + env("HOSTNAME") + } + } + + discovery.relabel "filtered_pods_with_paths" { + targets = discovery.relabel.filtered_pods.output + + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + separator = "/" + action = "replace" + replacement = "/var/log/pods/*$1/*.log" + target_label = "__path__" + } + } + + local.file_match "pod_logs" { + path_targets = discovery.relabel.filtered_pods_with_paths.output + } + + loki.source.file "pod_logs" { + targets = local.file_match.pod_logs.targets + forward_to = [loki.process.pod_logs.receiver] + } + + loki.process "pod_logs" { + stage.match { + selector = "{tmp_container_runtime=~\"containerd|cri-o\"}" + // the cri processing stage extracts the following k/v pairs: log, stream, time, flags + stage.cri {} + + // Set the extract flags and stream values as labels + stage.labels { + values = { + flags = "", + stream = "", + } + } + } + + stage.match { + selector = "{tmp_container_runtime=\"docker\"}" + // the docker processing stage extracts the following k/v pairs: log, stream, time + stage.docker {} + + // Set the extract stream value as a label + stage.labels { + values = { + stream = "", + } + } + } + + // Drop the filename label, since it's not really useful in the context of Kubernetes, where we already have cluster, + // namespace, pod, and container labels. Drop any structured metadata. Also drop the temporary + // container runtime label as it is no longer needed. + stage.label_drop { + values = [ + "filename", + "tmp_container_runtime", + ] + } + forward_to = argument.logs_destinations.value + } + } + pod_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] + } +--- +# Source: k8s-monitoring/templates/alloy-metrics-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-metrics + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + basic_auth { + username = nonsensitive(remote.kubernetes.secret.prometheus.data["prom-username"]) + password = remote.kubernetes.secret.prometheus.data["access-token"] + } + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + } + } + + remote.kubernetes.secret "prometheus" { + name = "my-monitoring-secret" + namespace = "monitoring" + } + + + // Feature: Prometheus Operator Objects + declare "prometheus_operator_objects" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + // Prometheus Operator podMonitor objects + prometheus.operator.podmonitors "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator podMonitor objects + prometheus.operator.probes "pod_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + + // Prometheus Operator ServiceMonitor objects + prometheus.operator.servicemonitors "service_monitors" { + clustering { + enabled = true + } + scrape { + default_scrape_interval = "60s" + } + forward_to = argument.metrics_destinations.value + } + } + prometheus_operator_objects "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus.receiver, + ] + } +--- +# Source: k8s-monitoring/templates/alloy-receiver-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ko-alloy-receiver + namespace: default +data: + config.alloy: |- + // Destination: prometheus (prometheus) + otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] + } + + prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.prometheus.data["tenantId"]), + } + basic_auth { + username = nonsensitive(remote.kubernetes.secret.prometheus.data["prom-username"]) + password = remote.kubernetes.secret.prometheus.data["access-token"] + } + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "external-secrets-example-cluster" + target_label = "cluster" + } + } + } + + remote.kubernetes.secret "prometheus" { + name = "my-monitoring-secret" + namespace = "monitoring" + } + + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/loki/api/v1/push" + tenant_id = nonsensitive(remote.kubernetes.secret.loki.data["tenantId"]) + basic_auth { + username = nonsensitive(remote.kubernetes.secret.loki.data["loki-username"]) + password = remote.kubernetes.secret.loki.data["access-token"] + } + } + external_labels = { + cluster = "external-secrets-example-cluster", + "k8s_cluster_name" = "external-secrets-example-cluster", + } + } + + remote.kubernetes.secret "loki" { + name = "my-monitoring-secret" + namespace = "monitoring" + } + + // Destination: tempo (otlp) + otelcol.auth.bearer "tempo" { + token = remote.kubernetes.secret.tempo.data["tempoBearerToken"] + } + + otelcol.processor.transform "tempo" { + error_mode = "ignore" + metric_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"external-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + log_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"external-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + trace_statements { + context = "resource" + statements = ["set(attributes[\"k8s.cluster.name\"], \"external-secrets-example-cluster\") where attributes[\"k8s.cluster.name\"] == nil"] + } + + output { + metrics = [otelcol.exporter.otlp.tempo.input] + logs = [otelcol.exporter.otlp.tempo.input] + traces = [otelcol.exporter.otlp.tempo.input] + } + } + otelcol.exporter.otlp "tempo" { + client { + endpoint = "http://tempo.tempo.svc:4317" + headers = { + "X-Scope-OrgID" = nonsensitive(remote.kubernetes.secret.tempo.data["tenantId"]), + } + tls { + insecure = false + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.tempo.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.tempo.data["cert"]) + key_pem = remote.kubernetes.secret.tempo.data["key"] + } + } + } + + remote.kubernetes.secret "tempo" { + name = "my-tempo-secret" + namespace = "tempo" + } + + + // Feature: Application Observability + declare "application_observability" { + argument "metrics_destinations" { + comment = "Must be a list of metrics destinations where collected metrics should be forwarded to" + } + + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + argument "traces_destinations" { + comment = "Must be a list of trace destinations where collected trace should be forwarded to" + } + + // Receivers --> Resource Detection Processor + otelcol.receiver.otlp "receiver" { + grpc { + endpoint = "0.0.0.0:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + output { + metrics = [otelcol.processor.resourcedetection.default.input] + logs = [otelcol.processor.resourcedetection.default.input] + traces = [otelcol.processor.resourcedetection.default.input] + } + } + + // Resource Detection Processor --> K8s Attribute Processor + otelcol.processor.resourcedetection "default" { + detectors = ["env", "system"] + system { + hostname_sources = ["os"] + } + + output { + metrics = [otelcol.processor.k8sattributes.default.input] + logs = [otelcol.processor.k8sattributes.default.input] + traces = [otelcol.processor.k8sattributes.default.input] + } + } + + // K8s Attribute Processor --> Transform Processor + // Resource Detection Processor Traces --> Host Info Connector + otelcol.processor.k8sattributes "default" { + extract { + metadata = ["k8s.namespace.name","k8s.pod.name","k8s.deployment.name","k8s.statefulset.name","k8s.daemonset.name","k8s.cronjob.name","k8s.job.name","k8s.node.name","k8s.pod.uid","k8s.pod.start_time"] + } + pod_association { + source { + from = "connection" + } + } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input, otelcol.connector.host_info.default.input] + } + } + // Host Info Connector --> Batch Processor + otelcol.connector.host_info "default" { + host_identifiers = [ "k8s.node.name" ] + + output { + metrics = [otelcol.processor.batch.default.input] + } + } + + + // Transform Processor --> Batch Processor + otelcol.processor.transform "default" { + error_mode = "ignore" + log_statements { + context = "resource" + statements = [ + "set(attributes[\"pod\"], attributes[\"k8s.pod.name\"])", + "set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])", + "set(attributes[\"loki.resource.labels\"], \"cluster, namespace, job, pod\")", + ] + } + + output { + metrics = [otelcol.processor.batch.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch.default.input] + } + } + + // Batch Processor --> Destinations + otelcol.processor.batch "default" { + output { + metrics = argument.metrics_destinations.value + logs = argument.logs_destinations.value + traces = argument.traces_destinations.value + } + } + } + application_observability "feature" { + metrics_destinations = [ + otelcol.exporter.prometheus.prometheus.input, + ] + logs_destinations = [ + otelcol.exporter.loki.loki.input, + ] + traces_destinations = [ + otelcol.processor.transform.tempo.input, + ] + } +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-logs +subjects: + - kind: ServiceAccount + name: ko-alloy-logs + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-metrics +subjects: + - kind: ServiceAccount + name: ko-alloy-metrics + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ko-alloy-receiver +subjects: + - kind: ServiceAccount + name: ko-alloy-receiver + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics-cluster + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + clusterIP: 'None' + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + ports: + # Do not include the -metrics suffix in the port name, otherwise metrics + # can be double-collected with the non-headless Service if it's also + # enabled. + # + # This service should only be used for clustering, and not metric + # collection. + - name: http + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.7.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-logs + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: varlog + mountPath: /var/log + readOnly: true + - name: dockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-logs + - name: varlog + hostPath: + path: /var/log + - name: dockercontainers + hostPath: + path: /var/lib/docker/containers +--- +# Source: k8s-monitoring/charts/alloy-receiver/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ko-alloy-receiver + labels: + helm.sh/chart: alloy-receiver-0.7.0 + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-receiver + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-receiver + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + - containerPort: 4317 + name: otlp-grpc + protocol: TCP + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-receiver +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: ko-alloy-metrics + labels: + helm.sh/chart: alloy-metrics-0.7.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + + app.kubernetes.io/version: "v1.3.1" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + replicas: 1 + podManagementPolicy: Parallel + minReadySeconds: 10 + serviceName: ko-alloy-metrics + selector: + matchLabels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + k8s.grafana.com/logs.job: integrations/alloy + labels: + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: ko + spec: + serviceAccountName: ko-alloy-metrics + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.3.1 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --cluster.enabled=true + - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.name="alloy-metrics" + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: ko-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/values.yaml b/charts/k8s-monitoring/docs/examples/auth/external-secrets/values.yaml new file mode 100644 index 000000000..c4caa32a9 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/values.yaml @@ -0,0 +1,66 @@ +--- +cluster: + name: external-secrets-example-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + auth: + type: basic + usernameKey: prom-username + passwordKey: access-token + secret: + create: false + name: my-monitoring-secret + namespace: monitoring + + - name: loki + type: loki + url: http://loki.loki.svc:3100/loki/api/v1/push + auth: + type: basic + usernameKey: loki-username + passwordKey: access-token + secret: + create: false + name: my-monitoring-secret + namespace: monitoring + + - name: tempo + type: otlp + url: http://tempo.tempo.svc:4317 + auth: + type: bearerToken + bearerTokenKey: tempoBearerToken + secret: + create: false + name: my-tempo-secret + namespace: tempo + +applicationObservability: + enabled: true + receivers: + grpc: + enabled: true + +prometheusOperatorObjects: + enabled: true + +podLogs: + enabled: true + +alloy-metrics: + enabled: true + +alloy-logs: + enabled: true + +alloy-receiver: + enabled: true + alloy: + extraPorts: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + protocol: TCP diff --git a/charts/k8s-monitoring/docs/examples/auth/sigv4/README.md b/charts/k8s-monitoring/docs/examples/auth/sigv4/README.md new file mode 100644 index 000000000..4e2a404eb --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/sigv4/README.md @@ -0,0 +1,32 @@ + +# AWS Signature Version 4 Auth Example + +This example shows how to configure a Prometheus destination using the AWS Signature Version 4 authentication method. + +## Values + +```yaml +--- +cluster: + name: sigv4-auth-example-cluster + +destinations: + - name: prometheus + type: prometheus + url: http://prometheus.prometheus.svc:9090/api/v1/write + auth: + type: sigv4 + sigv4: + region: ap-southeast-2 + accessKey: my-access-key + secretKey: my-secret-key + +clusterMetrics: + enabled: true + +alloy-metrics: + enabled: true +``` diff --git a/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy new file mode 100644 index 000000000..9357e2957 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy @@ -0,0 +1,228 @@ +// Destination: prometheus (prometheus) +otelcol.exporter.prometheus "prometheus" { + forward_to = [prometheus.remote_write.prometheus.receiver] +} + +prometheus.remote_write "prometheus" { + endpoint { + url = "http://prometheus.prometheus.svc:9090/api/v1/write" + headers = { + } + sigv4 { + access_key = nonsensitive(remote.kubernetes.secret.prometheus.data["accessKey"]) + region = "ap-southeast-2" + secret_key = remote.kubernetes.secret.prometheus.data["secretKey"] + } + tls_config { + insecure_skip_verify = false + ca_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["ca"]) + cert_pem = nonsensitive(remote.kubernetes.secret.prometheus.data["cert"]) + key_pem = remote.kubernetes.secret.prometheus.data["key"] + } + send_native_histograms = false + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "sigv4-auth-example-cluster" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "sigv4-auth-example-cluster" + target_label = "cluster" + } + } +} + +remote.kubernetes.secret "prometheus" { + name = "prometheus-ko-k8s-monitoring" + namespace = "default" +} +// Feature: Cluster Metrics +declare "cluster_metrics" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + import.git "kubernetes" { + repository = "https://github.com/grafana/alloy-modules.git" + revision = "main" + path = "modules/kubernetes/core/metrics.alloy" + pull_frequency = "15m" + } + + kubernetes.kubelet "scrape" { + clustering = true + keep_metrics = "up|container_cpu_usage_seconds_total|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_used|kubernetes_build_info|namespace_workload_pod|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = argument.metrics_destinations.value + } + + kubernetes.cadvisor "scrape" { + clustering = true + keep_metrics = "up|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = [prometheus.relabel.cadvisor.receiver] + } + + prometheus.relabel "cadvisor" { + max_cache_size = 100000 + // Drop empty container labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","container"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*)@" + action = "drop" + } + // Drop empty image labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","image"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*|container_network_.*)@" + action = "drop" + } + // Normalizing unimportant labels (not deleting to continue satisfying