diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e088d15..f653810 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -26,7 +26,6 @@ jobs: run: | helm repo add stable https://charts.helm.sh/stable helm repo add influxdb2 https://helm.influxdata.com/ - helm repo add kiwigrid https://kiwigrid.github.io helm repo add bitnami https://charts.bitnami.com/bitnami - name: Set up chart-testing diff --git a/charts/flagsmith/Chart.lock b/charts/flagsmith/Chart.lock index 0296d7d..9d1f30b 100644 --- a/charts/flagsmith/Chart.lock +++ b/charts/flagsmith/Chart.lock @@ -6,7 +6,7 @@ dependencies: repository: https://helm.influxdata.com/ version: 2.1.1 - name: graphite - repository: https://kiwigrid.github.io - version: 0.7.3 -digest: sha256:6ed1b4fab608bb1039a42040c445cfdf6a74a32ac80d4b1137f420e86f08e481 -generated: "2023-06-20T13:58:28.286957382+01:00" + repository: file://../graphite + version: 2.0.0 +digest: sha256:a978f9f6b4c171ddc773f38a1c83ea50198679825b6e27d3ba06caf7b4d561c1 +generated: "2025-11-14T15:58:27.463499Z" diff --git a/charts/flagsmith/Chart.yaml b/charts/flagsmith/Chart.yaml index c229238..621a503 100644 --- a/charts/flagsmith/Chart.yaml +++ b/charts/flagsmith/Chart.yaml @@ -15,7 +15,7 @@ dependencies: version: 2.1.1 condition: influxdb2.enabled - name: graphite - repository: https://kiwigrid.github.io - version: 0.7.3 + repository: file://../graphite + version: 2.0.0 condition: graphite.enabled icon: https://docs.flagsmith.com/img/square-icon.png diff --git a/charts/graphite/.helmignore b/charts/graphite/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/graphite/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/graphite/Chart.yaml b/charts/graphite/Chart.yaml new file mode 100644 index 0000000..ab7fc1d --- /dev/null +++ b/charts/graphite/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +version: 2.0.0 +appVersion: "1.1.10-3" +description: Graphite metrics server +name: graphite +home: https://graphiteapp.org/ diff --git a/charts/graphite/README.md b/charts/graphite/README.md new file mode 100644 index 0000000..9391888 --- /dev/null +++ b/charts/graphite/README.md @@ -0,0 +1,66 @@ +# Graphite + +[Graphite](https://graphiteapp.org/) is a monitoring tool. + +This chart was rescued from [here](https://github.com/kiwigrid/helm-charts) which is no longer hosted or supported. + +## Configuration + +The following table lists the configurable parameters of the Graphite chart and their default values. + +| Parameter | Description | Default | +|--------------------------------|----------------------------------------------|----------------------------------------| +| `image.repository` | Docker image repo | `graphiteapp/graphite-statsd` | +| `image.tag` | Docker image | `1.1.5-4` | +| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Service port of Graphite UI | `8080` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Service labels | `{}` | +| `persistence.enabled` | Enable config persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for config volume | `nil` | +| `persistence.existingClaim` | Name of an existing PVC to use for config | `nil` | +| `persistence.accessMode` | PVC Access Mode for config volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for config volume | `10Gi` | +| `resources` | Resource limits for Graphite pod | `{}` | +| `ingress.enabled` | Ingress enabled | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.path` | Ingress path | `/` | +| `ingress.hosts` | Ingress hosts | `[]` | +| `ingress.tls` | Ingress TLS | `[]` | +| `resources` | Resources | `{}` | +| `nodeSelector` | NodeSelector | `{}` | +| `tolerations` | Tolerations | `[]` | +| `affinity` | Affinity | `{}` | +| `env` | Environment Values Passed to Pod | `{}` | +| `timeZone` | Timezone | `Etc/UTC` | +| `initContainers` | Init Containers | `[]` | +| `configMaps` | Graphite Config files | see values.yaml | +| `statsdConfigMaps` | StatsD Config files | see values.yaml | +| `statsd.interface` | StatsD server interface, `TCP` or `UDP` | `UDP` | +| `configMaps` | Graphite Config files | see values.yaml | +| `statsdConfigMaps` | StatsD Config files | see values.yaml | +| `statsd.interface` | StatsD server interface, `TCP` or `UDP` | `UDP` | +| `serviceAccount.accountName` | Define the service account name | `graphite` | +| `serviceAccount.enabled`| Enable service account (Note: Service Account will only be automatically created if `serviceAccount.create` is not set. |`false`| +| `serviceAccount.create`| create service account with the template |`false`| +| `rbac.create`| Enable RBAC rules |`false`| +| `psp.create`| Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1. | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```bash +$ helm install --name graphite --set ingress.enabled=false kiwigrid/graphite +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. + +### Storage persistence + +Graphite itself is a stateful application that stores all related data in its own database. Therefore it uses a PVC to store data. + +### Help + +For more information about Graphite visit the official [website](https://graphiteapp.org/) and the [docs](http://graphite.readthedocs.io/en/latest/). + +To find infos about the Docker container visit [Github](https://github.com/graphite-project/docker-graphite-statsd) or [Dockerhub](https://hub.docker.com/r/graphiteapp/graphite-statsd/). diff --git a/charts/graphite/templates/NOTES.txt b/charts/graphite/templates/NOTES.txt new file mode 100644 index 0000000..7b2f568 --- /dev/null +++ b/charts/graphite/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "graphite.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "graphite.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "graphite.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "graphite.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/charts/graphite/templates/_helpers.tpl b/charts/graphite/templates/_helpers.tpl new file mode 100644 index 0000000..8c57421 --- /dev/null +++ b/charts/graphite/templates/_helpers.tpl @@ -0,0 +1,39 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "graphite.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "graphite.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "graphite.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Define the service Account name +*/}} +{{- define "graphite.serviceAccount.name" -}} +{{ default "graphite" .Values.serviceAccount.accountName }} +{{- end -}} \ No newline at end of file diff --git a/charts/graphite/templates/configmap-statsd.yaml b/charts/graphite/templates/configmap-statsd.yaml new file mode 100644 index 0000000..d2b12a9 --- /dev/null +++ b/charts/graphite/templates/configmap-statsd.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "graphite.fullname" . }}-statsd-configmap + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: +{{- range $key, $value := .Values.statsdConfigMaps }} + {{ $key }}: |- +{{ $value | indent 4 }} +{{- end }} diff --git a/charts/graphite/templates/configmap.yaml b/charts/graphite/templates/configmap.yaml new file mode 100644 index 0000000..aa276e1 --- /dev/null +++ b/charts/graphite/templates/configmap.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "graphite.fullname" . }}-configmap + labels: + app: {{ template "graphite.name" . }} + chart: {{ template "graphite.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{- range $key, $value := .Values.configMaps }} + {{ $key }}: |- +{{ $value | indent 4 }} +{{- end }} diff --git a/charts/graphite/templates/ingress.yaml b/charts/graphite/templates/ingress.yaml new file mode 100644 index 0000000..67c75ab --- /dev/null +++ b/charts/graphite/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "graphite.fullname" . -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $.Values.service.port }} + {{- end }} +{{- end }} diff --git a/charts/graphite/templates/pvc.yaml b/charts/graphite/templates/pvc.yaml new file mode 100644 index 0000000..ab122fe --- /dev/null +++ b/charts/graphite/templates/pvc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.persistence.enabled -}} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "graphite.fullname" . }}-pvc + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/graphite/templates/role.yaml b/charts/graphite/templates/role.yaml new file mode 100644 index 0000000..cdaa891 --- /dev/null +++ b/charts/graphite/templates/role.yaml @@ -0,0 +1,21 @@ +--- +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "graphite.serviceAccount.name" . }}-role + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - pks-privileged + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/graphite/templates/rolebinding.yaml b/charts/graphite/templates/rolebinding.yaml new file mode 100644 index 0000000..93edc2f --- /dev/null +++ b/charts/graphite/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +--- +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "graphite.serviceAccount.name" . }}-rb + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ default "graphite" .Values.serviceAccount.accountName }}-role +subjects: + - kind: ServiceAccount + name: {{ default "graphite" .Values.serviceAccount.accountName }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/graphite/templates/service.yaml b/charts/graphite/templates/service.yaml new file mode 100644 index 0000000..abbdd51 --- /dev/null +++ b/charts/graphite/templates/service.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "graphite.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: graphite-pickle + port: 2004 + protocol: TCP + - name: graphite-plain + port: 2003 + protocol: TCP + - name: graphite-udp + port: 2003 + protocol: UDP + - name: graphite-gui + port: {{ .Values.service.port }} + protocol: TCP + - name: aggregate-plain + port: 2023 + protocol: TCP + - name: aggregate-pickl + port: 2024 + protocol: TCP + - name: statsd + port: 8125 + protocol: {{ .Values.statsd.interface }} + - name: statsd-admin + port: 8126 + protocol: TCP + selector: + app.kubernetes.io/name: {{ include "graphite.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/graphite/templates/serviceaccount.yaml b/charts/graphite/templates/serviceaccount.yaml new file mode 100644 index 0000000..8db318e --- /dev/null +++ b/charts/graphite/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and (.Values.serviceAccount.enabled) (.Values.serviceAccount.create) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "graphite.serviceAccount.name" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} diff --git a/charts/graphite/templates/statefulset.yaml b/charts/graphite/templates/statefulset.yaml new file mode 100644 index 0000000..deda150 --- /dev/null +++ b/charts/graphite/templates/statefulset.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "graphite.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + helm.sh/chart: {{ include "graphite.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + serviceName: {{ template "graphite.name" . }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "graphite.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: +{{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ include "graphite.serviceAccount.name" . }} +{{- end }} +{{- if .Values.initContainers }} + initContainers: +{{ toYaml .Values.initContainers | indent 8 }} +{{- end }} + containers: + - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + name: {{ .Chart.Name }} + ports: + - name: graphite-gui + containerPort: {{ .Values.service.port }} + - name: graphite-plain + containerPort: 2003 + - name: graphite-udp + containerPort: 2003 + protocol: UDP + - name: graphite-pickle + containerPort: 2004 + - name: aggregate-plain + containerPort: 2023 + - name: aggregate-pickl + containerPort: 2024 + - name: statsd + protocol: {{ .Values.statsd.interface }} + containerPort: 8125 + - name: statsd-admin + containerPort: 8126 + env: + - name: "STATSD_INTERFACE" + value: {{ .Values.statsd.interface | lower }} + - name: "GRAPHITE_TIME_ZONE" + value: {{ .Values.timeZone }} + {{- if .Values.env }} + {{- range $key, $value := .Values.env }} + - name: {{ $value.name }} + value: {{ $value.value | quote }} + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: / + port: graphite-gui + readinessProbe: + httpGet: + path: / + port: graphite-gui + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: {{ template "graphite.fullname" . }}-configmap + mountPath: /opt/graphite/conf/ + - name: {{ template "graphite.fullname" . }}-statsd-configmap + subPath: config_tcp.js + mountPath: /opt/statsd/config/tcp.js + - name: {{ template "graphite.fullname" . }}-statsd-configmap + subPath: config_udp.js + mountPath: /opt/statsd/config/udp.js + - name: {{ template "graphite.fullname" . }}-pvc + mountPath: /opt/graphite/storage/ + volumes: + - name: {{ template "graphite.fullname" . }}-configmap + configMap: + name: {{ template "graphite.fullname" . }}-configmap + - name: {{ template "graphite.fullname" . }}-statsd-configmap + configMap: + name: {{ template "graphite.fullname" . }}-statsd-configmap + - name: {{ template "graphite.fullname" . }}-pvc +{{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "graphite.fullname" . }}-pvc{{- end }} +{{- else }} + emptyDir: {} +{{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} diff --git a/charts/graphite/values.yaml b/charts/graphite/values.yaml new file mode 100644 index 0000000..667e71d --- /dev/null +++ b/charts/graphite/values.yaml @@ -0,0 +1,1047 @@ +# Default values for graphite. + +image: + repository: graphiteapp/graphite-statsd + tag: 1.1.7-6 + pullPolicy: IfNotPresent + +service: + type: ClusterIP + port: 8080 + annotations: {} + labels: {} + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +env: {} +# - name: example-name +# value: example-value + +persistence: + ## Enable storage persistence using Persistent Volume Claims. + ## + enabled: true + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + accessMode: ReadWriteOnce + size: 10Gi + +resources: {} + # limits: + # cpu: 500m + # memory: 1024Mi + # requests: + # cpu: 200m + # memory: 512Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +timeZone: Etc/UTC + +serviceAccount: + accountName: "graphite" + enabled: false + create: false + +rbac: + create: false + +initContainers: [] +# - name: init-sysctl +# image: busybox +# command: +# - sysctl +# - -w +# - net.core.somaxconn=65535 +# securityContext: +# privileged: true + +configMaps: + aggregation-rules.conf: |- + # The form of each line in this file should be as follows: + # + # output_template (frequency) = method input_pattern + # + # This will capture any received metrics that match 'input_pattern' + # for calculating an aggregate metric. The calculation will occur + # every 'frequency' seconds and the 'method' can specify 'sum' or + # 'avg'. The name of the aggregate metric will be derived from + # 'output_template' filling in any captured fields from 'input_pattern'. + # + # For example, if you're metric naming scheme is: + # + # .applications... + # + # You could configure some aggregations like so: + # + # .applications..all.requests (60) = sum .applications..*.requests + # .applications..all.latency (60) = avg .applications..*.latency + # + # As an example, if the following metrics are received: + # + # prod.applications.apache.www01.requests + # prod.applications.apache.www01.requests + # + # They would all go into the same aggregation buffer and after 60 seconds the + # aggregate metric 'prod.applications.apache.all.requests' would be calculated + # by summing their values. + # + # Template components such as will match everything up to the next dot. + # To match metric multiple components including the dots, use <> in the + # input template: + # + # .applications..all. (60) = sum .applications..*.<> + # + # Note that any time this file is modified, it will be re-read automatically. + blacklist.conf: |- + # This file takes a single regular expression per line + # If USE_WHITELIST is set to True in carbon.conf, any metrics received which + # match one of these expressions will be dropped + # This file is reloaded automatically when changes are made + ^some\.noisy\.metric\.prefix\..* + carbon.conf: |- + [cache] + # Configure carbon directories. + # + # OS environment variables can be used to tell carbon where graphite is + # installed, where to read configuration from and where to write data. + # + # GRAPHITE_ROOT - Root directory of the graphite installation. + # Defaults to ../ + # GRAPHITE_CONF_DIR - Configuration directory (where this file lives). + # Defaults to $GRAPHITE_ROOT/conf/ + # GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files. + # Defaults to $GRAPHITE_ROOT/storage/ + # + # To change other directory paths, add settings to this file. The following + # configuration variables are available with these default values: + # + # STORAGE_DIR = $GRAPHITE_STORAGE_DIR + # LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/ + # WHITELISTS_DIR = %(STORAGE_DIR)s/lists/ + # CONF_DIR = %(STORAGE_DIR)s/conf/ + # LOG_DIR = %(STORAGE_DIR)s/log/ + # PID_DIR = %(STORAGE_DIR)s/ + # + # For FHS style directory structures, use: + # + # STORAGE_DIR = /var/lib/carbon/ + # CONF_DIR = /etc/carbon/ + # LOG_DIR = /var/log/carbon/ + # PID_DIR = /var/run/ + # + #LOCAL_DATA_DIR = /opt/graphite/storage/whisper/ + + # Specify the database library used to store metric data on disk. Each database + # may have configurable options to change the behaviour of how it writes to + # persistent storage. + # + # whisper - Fixed-size database, similar in design and purpose to RRD. This is + # the default storage backend for carbon and the most rigorously tested. + # + # ceres - Experimental alternative database that supports storing data in sparse + # files of arbitrary fixed-size resolutions. + DATABASE = whisper + + # Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no + # longer exists (i.e. it is removed or renamed) + ENABLE_LOGROTATION = True + + # Specify the user to drop privileges to + # If this is blank carbon-cache runs as the user that invokes it + # This user must have write access to the local data directory + USER = + + # Limit the size of the cache to avoid swapping or becoming CPU bound. + # Sorts and serving cache queries gets more expensive as the cache grows. + # Use the value "inf" (infinity) for an unlimited cache size. + # value should be an integer number of metric datapoints. + MAX_CACHE_SIZE = inf + + # Limits the number of whisper update_many() calls per second, which effectively + # means the number of write requests sent to the disk. This is intended to + # prevent over-utilizing the disk and thus starving the rest of the system. + # When the rate of required updates exceeds this, then carbon's caching will + # take effect and increase the overall throughput accordingly. + MAX_UPDATES_PER_SECOND = 500 + + # If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a + # stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is + # relatively low and carbon has cached a lot of updates; it enables the carbon + # daemon to shutdown more quickly. + # MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000 + + # Softly limits the number of whisper files that get created each minute. + # Setting this value low (e.g. 50) is a good way to ensure that your carbon + # system will not be adversely impacted when a bunch of new metrics are + # sent to it. The trade off is that any metrics received in excess of this + # value will be silently dropped, and the whisper file will not be created + # until such point as a subsequent metric is received and fits within the + # defined rate limit. Setting this value high (like "inf" for infinity) will + # cause carbon to create the files quickly but at the risk of increased I/O. + MAX_CREATES_PER_MINUTE = 50 + + # Set the minimum timestamp resolution supported by this instance. This allows + # internal optimisations by overwriting points with equal truncated timestamps + # in order to limit the number of updates to the database. It defaults to one + # second. + MIN_TIMESTAMP_RESOLUTION = 1 + + # Set the minimum lag in seconds for a point to be written to the database + # in order to optimize batching. This means that each point will wait at least + # the duration of this lag before being written. Setting this to 0 disable the feature. + # This currently only works when using the timesorted write strategy. + # MIN_TIMESTAMP_LAG = 0 + + # Set the interface and port for the line (plain text) listener. Setting the + # interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to + # disable this listener if it is not required. + LINE_RECEIVER_INTERFACE = 0.0.0.0 + LINE_RECEIVER_PORT = 2003 + + # Set this to True to enable the UDP listener. By default this is off + # because it is very common to run multiple carbon daemons and managing + # another (rarely used) port for every carbon instance is not fun. + ENABLE_UDP_LISTENER = False + UDP_RECEIVER_INTERFACE = 0.0.0.0 + UDP_RECEIVER_PORT = 2003 + + # Set the interface and port for the pickle listener. Setting the interface to + # 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this + # listener if it is not required. + PICKLE_RECEIVER_INTERFACE = 0.0.0.0 + PICKLE_RECEIVER_PORT = 2004 + + # Set the interface and port for the protobuf listener. Setting the interface to + # 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this + # listener if it is not required. + # PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0 + # PROTOBUF_RECEIVER_PORT = 2005 + + # Limit the number of open connections the receiver can handle as any time. + # Default is no limit. Setting up a limit for sites handling high volume + # traffic may be recommended to avoid running out of TCP memory or having + # thousands of TCP connections reduce the throughput of the service. + #MAX_RECEIVER_CONNECTIONS = inf + + # Per security concerns outlined in Bug #817247 the pickle receiver + # will use a more secure and slightly less efficient unpickler. + # Set this to True to revert to the old-fashioned insecure unpickler. + USE_INSECURE_UNPICKLER = False + + CACHE_QUERY_INTERFACE = 0.0.0.0 + CACHE_QUERY_PORT = 7002 + + # Set this to False to drop datapoints received after the cache + # reaches MAX_CACHE_SIZE. If this is True (the default) then sockets + # over which metrics are received will temporarily stop accepting + # data until the cache size falls below 95% MAX_CACHE_SIZE. + USE_FLOW_CONTROL = True + + # If enabled this setting is used to timeout metric client connection if no + # metrics have been sent in specified time in seconds + #METRIC_CLIENT_IDLE_TIMEOUT = None + + # By default, carbon-cache will log every whisper update and cache hit. + # This can be excessive and degrade performance if logging on the same + # volume as the whisper data is stored. + LOG_UPDATES = False + LOG_CREATES = False + LOG_CACHE_HITS = False + LOG_CACHE_QUEUE_SORTS = False + + # The thread that writes metrics to disk can use one of the following strategies + # determining the order in which metrics are removed from cache and flushed to + # disk. The default option preserves the same behavior as has been historically + # available in version 0.9.10. + # + # sorted - All metrics in the cache will be counted and an ordered list of + # them will be sorted according to the number of datapoints in the cache at the + # moment of the list's creation. Metrics will then be flushed from the cache to + # disk in that order. + # + # timesorted - All metrics in the list will be looked at and sorted according + # to the timestamp of there datapoints. The metric that were the least recently + # written will be written first. This is an hybrid strategy between max and + # sorted which is particularly adapted to sets of metrics with non-uniform + # resolutions. + # + # max - The writer thread will always pop and flush the metric from cache + # that has the most datapoints. This will give a strong flush preference to + # frequently updated metrics and will also reduce random file-io. Infrequently + # updated metrics may only ever be persisted to disk at daemon shutdown if + # there are a large number of metrics which receive very frequent updates OR if + # disk i/o is very slow. + # + # naive - Metrics will be flushed from the cache to disk in an unordered + # fashion. This strategy may be desirable in situations where the storage for + # whisper files is solid state, CPU resources are very limited or deference to + # the OS's i/o scheduler is expected to compensate for the random write + # pattern. + # + CACHE_WRITE_STRATEGY = sorted + + # On some systems it is desirable for whisper to write synchronously. + # Set this option to True if you'd like to try this. Basically it will + # shift the onus of buffering writes from the kernel into carbon's cache. + WHISPER_AUTOFLUSH = False + + # By default new Whisper files are created pre-allocated with the data region + # filled with zeros to prevent fragmentation and speed up contiguous reads and + # writes (which are common). Enabling this option will cause Whisper to create + # the file sparsely instead. Enabling this option may allow a large increase of + # MAX_CREATES_PER_MINUTE but may have longer term performance implications + # depending on the underlying storage configuration. + # WHISPER_SPARSE_CREATE = False + + # Only beneficial on linux filesystems that support the fallocate system call. + # It maintains the benefits of contiguous reads/writes, but with a potentially + # much faster creation speed, by allowing the kernel to handle the block + # allocation and zero-ing. Enabling this option may allow a large increase of + # MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported + # this option will gracefully fallback to standard POSIX file access methods. + WHISPER_FALLOCATE_CREATE = True + + # Enabling this option will cause Whisper to lock each Whisper file it writes + # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when + # multiple carbon-cache daemons are writing to the same files. + # WHISPER_LOCK_WRITES = False + + # On systems which has a large number of metrics, an amount of Whisper write(2)'s + # pageback sometimes cause disk thrashing due to memory shortage, so that abnormal + # disk reads occur. Enabling this option makes it possible to decrease useless + # page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option. + # WHISPER_FADVISE_RANDOM = False + + # By default all nodes stored in Ceres are cached in memory to improve the + # throughput of reads and writes to underlying slices. Turning this off will + # greatly reduce memory consumption for databases with millions of metrics, at + # the cost of a steep increase in disk i/o, approximately an extra two os.stat + # calls for every read and write. Reasons to do this are if the underlying + # storage can handle stat() with practically zero cost (SSD, NVMe, zRAM). + # Valid values are: + # all - all nodes are cached + # none - node caching is disabled + # CERES_NODE_CACHING_BEHAVIOR = all + + # Ceres nodes can have many slices and caching the right ones can improve + # performance dramatically. Note that there are many trade-offs to tinkering + # with this, and unless you are a ceres developer you *really* should not + # mess with this. Valid values are: + # latest - only the most recent slice is cached + # all - all slices are cached + # none - slice caching is disabled + # CERES_SLICE_CACHING_BEHAVIOR = latest + + # If a Ceres node accumulates too many slices, performance can suffer. + # This can be caused by intermittently reported data. To mitigate + # slice fragmentation there is a tolerance for how much space can be + # wasted within a slice file to avoid creating a new one. That tolerance + # level is determined by MAX_SLICE_GAP, which is the number of consecutive + # null datapoints allowed in a slice file. + # If you set this very low, you will waste less of the *tiny* bit disk space + # that this feature wastes, and you will be prone to performance problems + # caused by slice fragmentation, which can be pretty severe. + # If you set this really high, you will waste a bit more disk space (each + # null datapoint wastes 8 bytes, but keep in mind your filesystem's block + # size). If you suffer slice fragmentation issues, you should increase this or + # run the ceres-maintenance defrag plugin more often. However you should not + # set it to be huge because then if a large but allowed gap occurs it has to + # get filled in, which means instead of a simple 8-byte write to a new file we + # could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice. + # CERES_MAX_SLICE_GAP = 80 + + # Enabling this option will cause Ceres to lock each Ceres file it writes to + # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when + # multiple carbon-cache daemons are writing to the same files. + # CERES_LOCK_WRITES = False + + # Set this to True to enable whitelisting and blacklisting of metrics in + # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is + # missing or empty, all metrics will pass through + # USE_WHITELIST = False + + # By default, carbon itself will log statistics (such as a count, + # metricsReceived) with the top level prefix of 'carbon' at an interval of 60 + # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation + # CARBON_METRIC_PREFIX = carbon + CARBON_METRIC_INTERVAL = 10 + + # Enable AMQP if you want to receve metrics using an amqp broker + # ENABLE_AMQP = False + + # Verbose means a line will be logged for every metric received + # useful for testing + # AMQP_VERBOSE = False + + # AMQP_HOST = localhost + # AMQP_PORT = 5672 + # AMQP_VHOST = / + # AMQP_USER = guest + # AMQP_PASSWORD = guest + # AMQP_EXCHANGE = graphite + # AMQP_METRIC_NAME_IN_BODY = False + + # The manhole interface allows you to SSH into the carbon daemon + # and get a python interpreter. BE CAREFUL WITH THIS! If you do + # something like time.sleep() in the interpreter, the whole process + # will sleep! This is *extremely* helpful in debugging, assuming + # you are familiar with the code. If you are not, please don't + # mess with this, you are asking for trouble :) + # + # ENABLE_MANHOLE = False + # MANHOLE_INTERFACE = 127.0.0.1 + # MANHOLE_PORT = 7222 + # MANHOLE_USER = admin + # MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE= + + # Patterns for all of the metrics this machine will store. Read more at + # http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings + # + # Example: store all sales, linux servers, and utilization metrics + # BIND_PATTERNS = sales.#, servers.linux.#, #.utilization + # + # Example: store everything + # BIND_PATTERNS = # + + # URL of graphite-web instance, this is used to add incoming series to the tag database + GRAPHITE_URL = http://127.0.0.1:8080 + + # Tag update interval, this specifies how frequently updates to existing series will trigger + # an update to the tag index, the default setting is once every 100 updates + # TAG_UPDATE_INTERVAL = 100 + + # To configure special settings for the carbon-cache instance 'b', uncomment this: + #[cache:b] + #LINE_RECEIVER_PORT = 2103 + #PICKLE_RECEIVER_PORT = 2104 + #CACHE_QUERY_PORT = 7102 + # and any other settings you want to customize, defaults are inherited + # from the [cache] section. + # You can then specify the --instance=b option to manage this instance + # + # In order to turn off logging of successful connections for the line + # receiver, set this to False + # LOG_LISTENER_CONN_SUCCESS = True + + [relay] + LINE_RECEIVER_INTERFACE = 0.0.0.0 + LINE_RECEIVER_PORT = 2013 + PICKLE_RECEIVER_INTERFACE = 0.0.0.0 + PICKLE_RECEIVER_PORT = 2014 + + # Carbon-relay has several options for metric routing controlled by RELAY_METHOD + # + # Use relay-rules.conf to route metrics to destinations based on pattern rules + #RELAY_METHOD = rules + # + # Use consistent-hashing for even distribution of metrics between destinations + #RELAY_METHOD = consistent-hashing + # + # Use consistent-hashing but take into account an aggregation-rules.conf shared + # by downstream carbon-aggregator daemons. This will ensure that all metrics + # that map to a given aggregation rule are sent to the same carbon-aggregator + # instance. + # Enable this for carbon-relays that send to a group of carbon-aggregators + #RELAY_METHOD = aggregated-consistent-hashing + # + # You can also use fast-hashing and fast-aggregated-hashing which are in O(1) + # and will always redirect the metrics to the same destination but do not try + # to minimize rebalancing when the list of destinations is changing. + RELAY_METHOD = rules + + # If you use consistent-hashing you can add redundancy by replicating every + # datapoint to more than one machine. + REPLICATION_FACTOR = 1 + + # For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas + # across distributed hosts. With this setting disabled, it's possible that replicas + # may be sent to different caches on the same host. This has been the default + # behavior since introduction of 'consistent-hashing' relay method. + # Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing + # your metrics across the cluster nodes using a tool like Carbonate. + #DIVERSE_REPLICAS = True + + # This is a list of carbon daemons we will send any relayed or + # generated metrics to. The default provided would send to a single + # carbon-cache instance on the default port. However if you + # use multiple carbon-cache instances then it would look like this: + # + # DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b + # + # The general form is IP:PORT:INSTANCE where the :INSTANCE part is + # optional and refers to the "None" instance if omitted. + # + # Note that if the destinations are all carbon-caches then this should + # exactly match the webapp's CARBONLINK_HOSTS setting in terms of + # instances listed (order matters!). + # + # If using RELAY_METHOD = rules, all destinations used in relay-rules.conf + # must be defined in this list + DESTINATIONS = 127.0.0.1:2004 + + # This define the protocol to use to contact the destination. It can be + # set to one of "line", "pickle", "udp" and "protobuf". This list can be + # extended with CarbonClientFactory plugins and defaults to "pickle". + # DESTINATION_PROTOCOL = pickle + + # When using consistent hashing it sometime makes sense to make + # the ring dynamic when you don't want to loose points when a + # single destination is down. Replication is an answer to that + # but it can be quite expensive. + # DYNAMIC_ROUTER = False + + # Controls the number of connection attempts before marking a + # destination as down. We usually do one connection attempt per + # second. + # DYNAMIC_ROUTER_MAX_RETRIES = 5 + + # This is the maximum number of datapoints that can be queued up + # for a single destination. Once this limit is hit, we will + # stop accepting new data if USE_FLOW_CONTROL is True, otherwise + # we will drop any subsequently received datapoints. + MAX_QUEUE_SIZE = 10000 + + # This defines the maximum "message size" between carbon daemons. If + # your queue is large, setting this to a lower number will cause the + # relay to forward smaller discrete chunks of stats, which may prevent + # overloading on the receiving side after a disconnect. + MAX_DATAPOINTS_PER_MESSAGE = 500 + + # Limit the number of open connections the receiver can handle as any time. + # Default is no limit. Setting up a limit for sites handling high volume + # traffic may be recommended to avoid running out of TCP memory or having + # thousands of TCP connections reduce the throughput of the service. + #MAX_RECEIVER_CONNECTIONS = inf + + # Specify the user to drop privileges to + # If this is blank carbon-relay runs as the user that invokes it + # USER = + + # This is the percentage that the queue must be empty before it will accept + # more messages. For a larger site, if the queue is very large it makes sense + # to tune this to allow for incoming stats. So if you have an average + # flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense + # to allow stats to start flowing when you've cleared the queue to 95% since + # you should have space to accommodate the next minute's worth of stats + # even before the relay incrementally clears more of the queue + QUEUE_LOW_WATERMARK_PCT = 0.8 + + # To allow for batch efficiency from the pickle protocol and to benefit from + # other batching advantages, all writes are deferred by putting them into a queue, + # and then the queue is flushed and sent a small fraction of a second later. + TIME_TO_DEFER_SENDING = 0.0001 + + # Set this to False to drop datapoints when any send queue (sending datapoints + # to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the + # default) then sockets over which metrics are received will temporarily stop accepting + # data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE. + USE_FLOW_CONTROL = True + + # If enabled this setting is used to timeout metric client connection if no + # metrics have been sent in specified time in seconds + #METRIC_CLIENT_IDLE_TIMEOUT = None + + # Set this to True to enable whitelisting and blacklisting of metrics in + # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is + # missing or empty, all metrics will pass through + # USE_WHITELIST = False + + # By default, carbon itself will log statistics (such as a count, + # metricsReceived) with the top level prefix of 'carbon' at an interval of 60 + # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation + # CARBON_METRIC_PREFIX = carbon + CARBON_METRIC_INTERVAL = 10 + # + # In order to turn off logging of successful connections for the line + # receiver, set this to False + # LOG_LISTENER_CONN_SUCCESS = True + + # If you're connecting from the relay to a destination that's over the + # internet or similarly iffy connection, a backlog can develop because + # of internet weather conditions, e.g. acks getting lost or similar issues. + # To deal with that, you can enable USE_RATIO_RESET which will let you + # re-set the connection to an individual destination. Defaults to being off. + USE_RATIO_RESET=False + + # When there is a small number of stats flowing, it's not desirable to + # perform any actions based on percentages - it's just too "twitchy". + MIN_RESET_STAT_FLOW=1000 + + # When the ratio of stats being sent in a reporting interval is far + # enough from 1.0, we will disconnect the socket and reconnecto to + # clear out queued stats. The default ratio of 0.9 indicates that 10% + # of stats aren't being delivered within one CARBON_METRIC_INTERVAL + # (default of 60 seconds), which can lead to a queue backup. Under + # some circumstances re-setting the connection can fix this, so + # set this according to your tolerance, and look in the logs for + # "resetConnectionForQualityReasons" to observe whether this is kicking + # in when your sent queue is building up. + MIN_RESET_RATIO=0.9 + + # The minimum time between resets. When a connection is re-set, we + # need to wait before another reset is performed. + # (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed + # before stats for the new connection will be available. Setting this + # below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of + # reset connections for no good reason. + MIN_RESET_INTERVAL=121 + + [aggregator] + LINE_RECEIVER_INTERFACE = 0.0.0.0 + LINE_RECEIVER_PORT = 2023 + + PICKLE_RECEIVER_INTERFACE = 0.0.0.0 + PICKLE_RECEIVER_PORT = 2024 + + # If set true, metric received will be forwarded to DESTINATIONS in addition to + # the output of the aggregation rules. If set false the carbon-aggregator will + # only ever send the output of aggregation. + FORWARD_ALL = True + + # Filenames of the configuration files to use for this instance of aggregator. + # Filenames are relative to CONF_DIR. + # + # AGGREGATION_RULES = aggregation-rules.conf + # REWRITE_RULES = rewrite-rules.conf + + # This is a list of carbon daemons we will send any relayed or + # generated metrics to. The default provided would send to a single + # carbon-cache instance on the default port. However if you + # use multiple carbon-cache instances then it would look like this: + # + # DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b + # + # The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is + # optional and refers to the "None" instance if omitted. + # + # Note that if the destinations are all carbon-caches then this should + # exactly match the webapp's CARBONLINK_HOSTS setting in terms of + # instances listed (order matters!). + DESTINATIONS = 127.0.0.1:2004 + + # If you want to add redundancy to your data by replicating every + # datapoint to more than one machine, increase this. + REPLICATION_FACTOR = 1 + + # This is the maximum number of datapoints that can be queued up + # for a single destination. Once this limit is hit, we will + # stop accepting new data if USE_FLOW_CONTROL is True, otherwise + # we will drop any subsequently received datapoints. + MAX_QUEUE_SIZE = 10000 + + # Set this to False to drop datapoints when any send queue (sending datapoints + # to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the + # default) then sockets over which metrics are received will temporarily stop accepting + # data until the send queues fall below 80% MAX_QUEUE_SIZE. + USE_FLOW_CONTROL = True + + # If enabled this setting is used to timeout metric client connection if no + # metrics have been sent in specified time in seconds + #METRIC_CLIENT_IDLE_TIMEOUT = None + + # This defines the maximum "message size" between carbon daemons. + # You shouldn't need to tune this unless you really know what you're doing. + MAX_DATAPOINTS_PER_MESSAGE = 500 + + # This defines how many datapoints the aggregator remembers for + # each metric. Aggregation only happens for datapoints that fall in + # the past MAX_AGGREGATION_INTERVALS * intervalSize seconds. + MAX_AGGREGATION_INTERVALS = 5 + + # Limit the number of open connections the receiver can handle as any time. + # Default is no limit. Setting up a limit for sites handling high volume + # traffic may be recommended to avoid running out of TCP memory or having + # thousands of TCP connections reduce the throughput of the service. + #MAX_RECEIVER_CONNECTIONS = inf + + # By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back + # aggregated data points once every rule.frequency seconds, on a per-rule basis. + # Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points + # every N seconds, independent of rule frequency. This is useful, for example, + # to be able to query partially aggregated metrics from carbon-cache without + # having to first wait rule.frequency seconds. + # WRITE_BACK_FREQUENCY = 0 + + # Set this to True to enable whitelisting and blacklisting of metrics in + # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is + # missing or empty, all metrics will pass through + # USE_WHITELIST = False + + # By default, carbon itself will log statistics (such as a count, + # metricsReceived) with the top level prefix of 'carbon' at an interval of 60 + # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation + # CARBON_METRIC_PREFIX = carbon + CARBON_METRIC_INTERVAL = 10 + + # In order to turn off logging of successful connections for the line + # receiver, set this to False + # LOG_LISTENER_CONN_SUCCESS = True + + # In order to turn off logging of metrics with no corresponding + # aggregation rules receiver, set this to False + # LOG_AGGREGATOR_MISSES = False + + # Specify the user to drop privileges to + # If this is blank carbon-aggregator runs as the user that invokes it + # USER = + + # Part of the code, and particularly aggregator rules, need + # to cache metric names. To avoid leaking too much memory you + # can tweak the size of this cache. The default allow for 1M + # different metrics per rule (~200MiB). + # CACHE_METRIC_NAMES_MAX=1000000 + + # You can optionally set a ttl to this cache. + # CACHE_METRIC_NAMES_TTL=600 + carbon.amqp.conf: |- + # This is a configuration file with AMQP enabled + + [cache] + LOCAL_DATA_DIR = + + # Specify the user to drop privileges to + # If this is blank carbon runs as the user that invokes it + # This user must have write access to the local data directory + USER = + + # Limit the size of the cache to avoid swapping or becoming CPU bound. + # Sorts and serving cache queries gets more expensive as the cache grows. + # Use the value "inf" (infinity) for an unlimited cache size. + MAX_CACHE_SIZE = inf + + # Limits the number of whisper update_many() calls per second, which effectively + # means the number of write requests sent to the disk. This is intended to + # prevent over-utilizing the disk and thus starving the rest of the system. + # When the rate of required updates exceeds this, then carbon's caching will + # take effect and increase the overall throughput accordingly. + MAX_UPDATES_PER_SECOND = 1000 + + # Softly limits the number of whisper files that get created each minute. + # Setting this value low (like at 50) is a good way to ensure your graphite + # system will not be adversely impacted when a bunch of new metrics are + # sent to it. The trade off is that it will take much longer for those metrics' + # database files to all get created and thus longer until the data becomes usable. + # Setting this value high (like "inf" for infinity) will cause graphite to create + # the files quickly but at the risk of slowing I/O down considerably for a while. + MAX_CREATES_PER_MINUTE = inf + + LINE_RECEIVER_INTERFACE = 0.0.0.0 + LINE_RECEIVER_PORT = 2003 + + UDP_RECEIVER_INTERFACE = 0.0.0.0 + UDP_RECEIVER_PORT = 2003 + + PICKLE_RECEIVER_INTERFACE = 0.0.0.0 + PICKLE_RECEIVER_PORT = 2004 + + CACHE_QUERY_INTERFACE = 0.0.0.0 + CACHE_QUERY_PORT = 7002 + + # Enable AMQP if you want to receve metrics using you amqp broker + ENABLE_AMQP = True + + # Verbose means a line will be logged for every metric received + # useful for testing + AMQP_VERBOSE = True + + # your credentials for the amqp server + # AMQP_USER = guest + # AMQP_PASSWORD = guest + + # the network settings for the amqp server + # AMQP_HOST = localhost + # AMQP_PORT = 5672 + + # if you want to include the metric name as part of the message body + # instead of as the routing key, set this to True + # AMQP_METRIC_NAME_IN_BODY = False + + # NOTE: you cannot run both a cache and a relay on the same server + # with the default configuration, you have to specify a distinict + # interfaces and ports for the listeners. + + [relay] + LINE_RECEIVER_INTERFACE = 0.0.0.0 + LINE_RECEIVER_PORT = 2003 + + PICKLE_RECEIVER_INTERFACE = 0.0.0.0 + PICKLE_RECEIVER_PORT = 2004 + + CACHE_SERVERS = server1, server2, server3 + MAX_QUEUE_SIZE = 10000 + dashboard.conf: |- + # This configuration file controls the behavior of the Dashboard UI, available + # at http://my-graphite-server/dashboard/. + # + # This file must contain a [ui] section that defines values for all of the + # following settings. + [ui] + default_graph_width = 400 + default_graph_height = 250 + automatic_variants = true + refresh_interval = 60 + autocomplete_delay = 375 + merge_hover_delay = 750 + + # You can set this 'default', 'white', or a custom theme name. + # To create a custom theme, copy the dashboard-default.css file + # to dashboard-myThemeName.css in the content/css directory and + # modify it to your liking. + theme = default + + [keyboard-shortcuts] + toggle_toolbar = ctrl-z + toggle_metrics_panel = ctrl-space + erase_all_graphs = alt-x + save_dashboard = alt-s + completer_add_metrics = alt-enter + completer_del_metrics = alt-backspace + give_completer_focus = shift-space + + # These settings apply to the UI as a whole, all other sections in this file + # pertain only to specific metric types. + # + # The dashboard presents only metrics that fall into specified naming schemes + # defined in this file. This creates a simpler, more targetted view of the + # data. The general form for defining a naming scheme is as follows: + # + #[Metric Type] + #scheme = basis.path... + #field1.label = Foo + #field2.label = Bar + # + # + # Where each will be displayed as a dropdown box + # in the UI and the remaining portion of the namespace + # shown in the Metric Selector panel. The .label options set the labels + # displayed for each dropdown. + # + # For example: + # + #[Sales] + #scheme = sales... + #channel.label = Channel + #type.label = Product Type + #brand.label = Brand + # + # This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector + # (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc) + # will be available in the Metric Selector (upper-right panel). + graphite.wsgi.example: |- + import sys + sys.path.append('/opt/graphite/webapp') + + from graphite.wsgi import application + graphTemplates.conf: |- + [default] + background = black + foreground = white + majorLine = white + minorLine = grey + lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose + fontName = Sans + fontSize = 10 + fontBold = False + fontItalic = False + + [noc] + background = black + foreground = white + majorLine = white + minorLine = grey + lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose + fontName = Sans + fontSize = 10 + fontBold = False + fontItalic = False + + [plain] + background = white + foreground = black + minorLine = grey + majorLine = rose + + [summary] + background = black + lineColors = #6666ff, #66ff66, #ff6666 + + [alphas] + background = white + foreground = black + majorLine = grey + minorLine = rose + lineColors = 00ff00aa,ff000077,00337799 + relay-rules.conf: |- + # Relay destination rules for carbon-relay. Entries are scanned in order, + # and the first pattern a metric matches will cause processing to cease after sending + # unless `continue` is set to true + # + # [name] + # pattern = + # destinations = + # continue = # default: False + # + # name: Arbitrary unique name to identify the rule + # pattern: Regex pattern to match against the metric name + # destinations: Comma-separated list of destinations. + # ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com + # continue: Continue processing rules if this rule matches (default: False) + + # You must have exactly one section with 'default = true' + # Note that all destinations listed must also exist in carbon.conf + # in the DESTINATIONS setting in the [relay] section + [default] + default = true + destinations = 0.0.0.0:2004 + rewrite-rules.conf: |- + # This file defines regular expression patterns that can be used to + # rewrite metric names in a search & replace fashion. It consists of two + # sections, [pre] and [post]. The rules in the pre section are applied to + # metric names as soon as they are received. The post rules are applied + # after aggregation has taken place. + # + # The general form of each rule is as follows: + # + # regex-pattern = replacement-text + # + # For example: + # + # [post] + # _sum$ = + # _avg$ = + # + # These rules would strip off a suffix of _sum or _avg from any metric names + # after aggregation. + storage-aggregation.conf: |- + # Aggregation methods for whisper files. Entries are scanned in order, + # and first match wins. This file is scanned for changes every 60 seconds + # + # [name] + # pattern = + # xFilesFactor = + # aggregationMethod = + # + # name: Arbitrary unique name for the rule + # pattern: Regex pattern to match against the metric name + # xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur + # aggregationMethod: function to apply to data points for aggregation + # + [min] + pattern = \.lower$ + xFilesFactor = 0.1 + aggregationMethod = min + + [max] + pattern = \.upper(_\d+)?$ + xFilesFactor = 0.1 + aggregationMethod = max + + [sum] + pattern = \.sum$ + xFilesFactor = 0 + aggregationMethod = sum + + [count] + pattern = \.count$ + xFilesFactor = 0 + aggregationMethod = sum + + [count_legacy] + pattern = ^stats_counts.* + xFilesFactor = 0 + aggregationMethod = sum + + [default_average] + pattern = .* + xFilesFactor = 0.3 + aggregationMethod = average + storage-schemas.conf: |- + # Schema definitions for Whisper files. Entries are scanned in order, + # and first match wins. This file is scanned for changes every 60 seconds. + # + # Definition Syntax: + # + # [name] + # pattern = regex + # retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ... + # + # Remember: To support accurate aggregation from higher to lower resolution + # archives, the precision of a longer retention archive must be + # cleanly divisible by precision of next lower retention archive. + # + # Valid: 60s:7d,300s:30d (300/60 = 5) + # Invalid: 180s:7d,300s:30d (300/180 = 3.333) + # + + # Carbon's internal metrics. This entry should match what is specified in + # CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings + [carbon] + pattern = ^carbon\. + retentions = 10s:6h,1m:90d + + [default_1min_for_1day] + pattern = .* + retentions = 10s:6h,1m:6d,10m:1800d + whitelist.conf: |- + # This file takes a single regular expression per line + # If USE_WHITELIST is set to True in carbon.conf, only metrics received which + # match one of these expressions will be persisted. If this file is empty or + # missing, all metrics will pass through. + # This file is reloaded automatically when changes are made + .* + +statsdConfigMaps: + config_tcp.js: |- + { + "graphiteHost": "127.0.0.1", + "graphitePort": 2003, + "port": 8125, + "flushInterval": 10000, + "servers": [{ + "server": "./servers/tcp", + "address": "0.0.0.0", + "port": 8125 + }] + } + config_udp.js: |- + { + "graphiteHost": "127.0.0.1", + "graphitePort": 2003, + "port": 8125, + "flushInterval": 10000, + "servers": [{ + "server": "./servers/udp", + "address": "0.0.0.0", + "port": 8125 + }] + } + +statsd: + interface: UDP diff --git a/ct.yaml b/ct.yaml index f5e4bdf..4294a91 100644 --- a/ct.yaml +++ b/ct.yaml @@ -4,7 +4,6 @@ chart-repos: - stable=https://charts.helm.sh/stable - bitnami=https://charts.bitnami.com/bitnami - influxdata=https://helm.influxdata.com/ - - kiwigrid=https://kiwigrid.github.io target-branch: main validate-maintainers: false helm-extra-args: '--timeout 30m'