From 06aac101c578882a2aac9f0b0b88735ddc9e0cdc Mon Sep 17 00:00:00 2001 From: Jorge Perez Date: Wed, 13 Mar 2024 10:00:49 -0500 Subject: [PATCH 01/20] Adding documentation for grafana deploy (#133) * Adding documentation for grafana deploy Signed-off-by: jorge-perez --- docs/grafana.md | 140 ++++++++++++++++++++++++++++++++++++++++++++++++ mkdocs.yml | 6 +++ 2 files changed, 146 insertions(+) create mode 100644 docs/grafana.md diff --git a/docs/grafana.md b/docs/grafana.md new file mode 100644 index 00000000..07310958 --- /dev/null +++ b/docs/grafana.md @@ -0,0 +1,140 @@ +# Grafana + +--- + +!!! note + This deployment makes a few assumption: + + * assumes you are using OAuth using Azure + * assumes you are using tls/ssl + * assumes you are using ingress + + If this does not apply to your deployment adjust the overrides.yaml file and skip over any unneeded sections here + +## Create secret client file + +In order to avoid putting sensative information on the cli, it is recommended to create and use a secret file instead. + +example secret file: + +```yaml +apiversion: v1 +data: + client_id: base64_encoded_client_id + client_secret: base64_encoded_client_secret +kind: secret +metadata: + name: azure-client + namespace: grafana +type: opaque +``` + +--- + +## Create a datasources yaml + +If you have specific datasources that should be populated when grafana deploys, create a seperate datasource.yaml. The example below shows one way to configure prometheus and loki datasources. + +example datasources yaml file: + +```yaml +datasources: + datasources.yaml: + apiversion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + url: http://kube-prometheus-stack-prometheus.prometheus.svc.cluster.local:9090 + isdefault: true + - name: loki + type: loki + access: proxy + url: http://loki-gateway.{{ .release.namespace }}.svc.cluster.local:80 + editable: false +``` + +--- + +## Create your ssl files + +If you are configuring grafana to use tls/ssl, you should create a file for your certificate and a file for your key. After the deployment, these files can be deleted if desired since the cert and key will now be in a Kubernetes secret. + +Your cert and key files should look something like the following (cert and key example taken from [VMware Docs](https://docs.vmware.com/en/VMware-NSX-Data-Center-for-vSphere/6.4/com.vmware.nsx.admin.doc/GUID-BBC4804F-AC54-4DD2-BF6B-ECD2F60083F6.html "VMware Docs")). + +??? example + + === "Cert file" + ``` + -----BEGIN CERTIFICATE----- + MIID0DCCARIGAWIBAGIBATANBGKQHKIG9W0BAQUFADB/MQSWCQYDVQQGEWJGUJET + MBEGA1UECAWKU29TZS1TDGF0ZTEOMAWGA1UEBWWFUGFYAXMXDTALBGNVBAOMBERP + BWKXDTALBGNVBASMBE5TQLUXEDAOBGNVBAMMB0RPBWKGQ0EXGZAZBGKQHKIG9W0B + CQEWDGRPBWLAZGLTAS5MCJAEFW0XNDAXMJGYMDM2NTVAFW0YNDAXMJYYMDM2NTVA + MFSXCZAJBGNVBAYTAKZSMRMWEQYDVQQIDAPTB21LLVN0YXRLMSEWHWYDVQQKDBHJ + BNRLCM5LDCBXAWRNAXRZIFB0ESBMDGQXFDASBGNVBAMMC3D3DY5KAW1PLMZYMIIB + IJANBGKQHKIG9W0BAQEFAAOCAQ8AMIIBCGKCAQEAVPNAPKLIKDVX98KW68LZ8PGA + RRCYERSNGQPJPIFMVJJE8LUCOXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWX + SXITRW99HBFAL1MDQYWCUKOEB9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P + 1NCVW+6B/AAN9L1G2PQXGRDYC/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYB + AKJQETWWV6DFK/GRDOSED/6BW+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAU + ZKCHSRYC/WHVURX6O85D6QPZYWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWID + AQABO3SWETAJBGNVHRMEAJAAMCWGCWCGSAGG+EIBDQQFFH1PCGVUU1NMIEDLBMVY + YXRLZCBDZXJ0AWZPY2F0ZTADBGNVHQ4EFGQU+TUGFTYN+CXE1WXUQEA7X+YS3BGW + HWYDVR0JBBGWFOAUHMWQKBBRGP87HXFVWGPNLGGVR64WDQYJKOZIHVCNAQEFBQAD + GGEBAIEEMQQHEZEXZ4CKHE5UM9VCKZKJ5IV9TFS/A9CCQUEPZPLT7YVMEVBFNOC0 + +1ZYR4TXGI4+5MHGZHYCIVVHO4HKQYM+J+O5MWQINF1QOAHUO7CLD3WNA1SKCVUV + VEPIXC/1AHZRG+DPEEHT0MDFFOW13YDUC2FH6AQEDCEL4AV5PXQ2EYR8HR4ZKBC1 + FBTUQUSVA8NWSIYZQ16FYGVE+ANF6VXVUIZYVWDRPRV/KFVLNA3ZPNLMMXU98MVH + PXY3PKB8++6U4Y3VDK2NI2WYYLILS8YQBM4327IKMKDC2TIMS8U60CT47MKU7ADY + CBTV5RDKRLAYWM5YQLTIGLVCV7O= + -----END CERTIFICATE----- + ``` + + === "Key file" + ``` + -----BEGIN RSA PRIVATE KEY----- + MIIEOWIBAAKCAQEAVPNAPKLIKDVX98KW68LZ8PGARRCYERSNGQPJPIFMVJJE8LUC + OXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWXSXITRW99HBFAL1MDQYWCUKOE + B9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P1NCVW+6B/AAN9L1G2PQXGRDY + C/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYBAKJQETWWV6DFK/GRDOSED/6B + W+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAUZKCHSRYC/WHVURX6O85D6QPZ + YWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWIDAQABAOIBAFML8CD9A5PMQLW3 + F9BTTQZ1SRL4FVP7CMHSXHVJSJEHWHHCKEE0OBKWTRSGKTSM1XLU5W8IITNHN0+1 + INR+78EB+RRGNGDAXH8DIODKEY+8/CEE8TFI3JYUTKDRLXMBWIKSOUVVIUMOQ3FX + OGQYWQ0Z2L/PVCWY/Y82FFQ3YSC5GAJSBBYSCRG14BQO44ULRELE4SDWS5HCJKYB + EI2B8COMUCQZSOTXG9NILN/JE2BO/I2HGSAWIBGCODBMS8K6TVSSRZMR3KJ5O6J+ + 77LGWKH37BRVGBVYVBQ6NWPL0XLG7DUV+7LWEO5QQAPY6AXB/ZBCKQLQU6/EJOVE + YDG5JQECGYEA9KKFTZD/WEVAREA0DZFEJRU8VLNWOAGL7CJAODXQXOS4MCR5MPDT + KBWGFKLFFH/AYUNPBLK6BCJP1XK67B13ETUA3I9Q5T1WUZEOBIKKBLFM9DDQJT43 + UKZWJXBKFGSVFRYPTGZST719MZVCPCT2CZPJEGN3HLPT6FYW3EORNOECGYEAXIOU + JWXCOMUGAB7+OW2TR0PGEZBVVLEGDKAJ6TC/HOKM1A8R2U4HLTEJJCRLLTFW++4I + DDHE2DLER4Q7O58SFLPHWGPMLDEZN7WRLGR7VYFUV7VMAHJGUC3GV9AGNHWDLA2Q + GBG9/R9OVFL0DC7CGJGLEUTITCYC31BGT3YHV0MCGYEA4K3DG4L+RN4PXDPHVK9I + PA1JXAJHEIFEHNAW1D3VWKBSKVJMGVF+9U5VEV+OWRHN1QZPZV4SURI6M/8LK8RA + GR4UNM4AQK4K/QKY4G05LKRIK9EV2CGQSLQDRA7CJQ+JN3NB50QG6HFNFPAFN+J7 + 7JUWLN08WFYV4ATPDD+9XQECGYBXIZKZFL+9IQKFOCONVWAZGO+DQ1N0L3J4ITIK + W56CKWXYJ88D4QB4EUU3YJ4UB4S9MIAW/ELEWKZIBWPUPFAN0DB7I6H3ZMP5ZL8Q + QS3NQCB9DULMU2/TU641ERUKAMIOKA1G9SNDKAZUWO+O6FDKIB1RGOBK9XNN8R4R + PSV+AQKBGB+CICEXR30VYCV5BNZN9EFLIXNKAEMJURYCXCRQNVRNUIUBVAO8+JAE + CDLYGS5RTGOLZIB0IVERQWSP3EI1ACGULTS0VQ9GFLQGAN1SAMS40C9KVNS1MLDU + LHIHYPJ8USCVT5SNWO2N+M+6ANH5TPWDQNEK6ZILH4TRBUZAIHGB + -----END RSA PRIVATE KEY----- + ``` + +--- + +## Add repo and install + +```shell +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +kubectl create ns grafana +kubectl -n grafana create secret tls grafana-tls-public --cert=your_cert_file --key=your_key_file + +kubectl -n grafana create secret generic azure-client --type opaque --from-literal=client_id="your_client_id" --from-literal=client_secret="your_client_secret" +or +kubectl -n grafana apply -f azure-secrets.yaml + +helm upgrade --install grafana grafana/grafana --namespace grafana --values overrides.yaml -f datasources.yaml --set tenant_id=your_tenant_id --set custom_host=your_url_for_ingress +``` diff --git a/mkdocs.yml b/mkdocs.yml index 6c385096..ce81a004 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -112,6 +112,9 @@ markdown_extensions: line_spans: __span pygments_lang_class: true - pymdownx.inlinehilite + - pymdownx.details + - pymdownx.tabbed: + alternate_style: true - pymdownx.snippets: restrict_base_path: false @@ -146,6 +149,9 @@ nav: - Ceph External: storage-ceph-rook-external.md - NFS External: storage-nfs-external.md - TopoLVM: storage-topolvm.md + - Monitoring: + - Monitoring Overview: prometheus.md + - Grafana: grafana.md - Secrets: - vault.md - Vault Operator: vault-secrets-operator.md From 58529062289c42d77a799724eea59a4be4602a22 Mon Sep 17 00:00:00 2001 From: Jorge Perez Date: Wed, 13 Mar 2024 11:17:42 -0500 Subject: [PATCH 02/20] Fixing mkdoc.yml - duplicate Monitoring section. (#147) Signed-off-by: jorge-perez --- mkdocs.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index ce81a004..0c30a84a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -149,9 +149,6 @@ nav: - Ceph External: storage-ceph-rook-external.md - NFS External: storage-nfs-external.md - TopoLVM: storage-topolvm.md - - Monitoring: - - Monitoring Overview: prometheus.md - - Grafana: grafana.md - Secrets: - vault.md - Vault Operator: vault-secrets-operator.md @@ -188,6 +185,7 @@ nav: - Monitoring: - Monitoring Overview: prometheus-monitoring-overview.md - Prometheus: prometheus.md + - Grafana: grafana.md - MySQL Exporter: prometheus-mysql-exporter.md - RabbitMQ Exporter: prometheus-rabbitmq-exporter.md - Memcached Exporter: prometheus-memcached-exporter.md From 5bddc8b01298d23942deaba242a52c292ce880d9 Mon Sep 17 00:00:00 2001 From: Tim Olow Date: Thu, 14 Mar 2024 09:27:22 -0500 Subject: [PATCH 03/20] Update to latest stable ceph release (#149) Bump 18.2.1 to 18.2.2 latest stable ceph release --- kustomize/rook-cluster/rook-cluster.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kustomize/rook-cluster/rook-cluster.yaml b/kustomize/rook-cluster/rook-cluster.yaml index 632d052a..c95de39f 100644 --- a/kustomize/rook-cluster/rook-cluster.yaml +++ b/kustomize/rook-cluster/rook-cluster.yaml @@ -21,7 +21,7 @@ spec: # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v17.2.6-20231027 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v18.2.1 + image: quay.io/ceph/ceph:v18.2.2 # Whether to allow unsupported versions of Ceph. Currently `quincy` and `reef` are supported. # Future versions such as `squid` (v19) would require this to be set to `true`. # Do not set to true in production. From ca2c9d5cf7d8f496789d0c149f362d12093c9912 Mon Sep 17 00:00:00 2001 From: Don Norton <5455792+donnorton@users.noreply.github.com> Date: Fri, 15 Mar 2024 13:40:59 -0400 Subject: [PATCH 04/20] Update genestack-getting-started.md (#153) Minor grammatical improvements --- docs/genestack-getting-started.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/genestack-getting-started.md b/docs/genestack-getting-started.md index 70f3ea7b..5b2e80fb 100644 --- a/docs/genestack-getting-started.md +++ b/docs/genestack-getting-started.md @@ -2,11 +2,11 @@ # What is Genestack? -Genestack is a complete operations and deployment ecosystem for Kubernetes and OpenStack. The purpose is of +Genestack is a complete operations and deployment ecosystem for Kubernetes and OpenStack. The purpose of this project is to allow hobbyists, operators, and cloud service providers the ability to build, scale, and leverage Open-Infrastructure in new and exciting ways. -Genestack’s inner workings are a blend dark magic — crafted with [Kustomize](https://kustomize.io) and +Genestack’s inner workings are a blend of dark magic — crafted with [Kustomize](https://kustomize.io) and [Helm](https://helm.sh). It’s like cooking with cloud. Want to spice things up? Tweak the `kustomization.yaml` files or add those extra 'toppings' using Helm's style overrides. However, the platform is ready to go with batteries included. @@ -18,7 +18,7 @@ to manage cloud infrastructure in the way you need it. ## Getting Started -Before you can do anything we need to get the code. Because we've sold our soul to the submodule devil, you're going to need to recursively clone the repo into your location. +Before you can do anything, you need to get the code. Because we've sold our soul to the submodule devil, you're going to need to recursively clone the repo into your location. !!! info From 984d125a8db5d9d121d1ca19679e9b051bea4fd4 Mon Sep 17 00:00:00 2001 From: "phillip.toohill" Date: Mon, 18 Mar 2024 09:37:52 -0500 Subject: [PATCH 05/20] Monitoring: Adding postgres exporter (#154) --- ...ustomize-prometheus-postgres-exporter.yaml | 33 +++ docs/prometheus-postgres-exporter.md | 18 ++ docs/prometheus-rabbitmq-exporter.md | 3 +- .../postgresql/postgresql-helm-overrides.yaml | 16 +- .../kustomization.yaml | 8 + .../prometheus-postgres-exporter/values.yaml | 259 ++++++++++++++++++ mkdocs.yml | 3 +- 7 files changed, 329 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/kustomize-prometheus-postgres-exporter.yaml create mode 100644 docs/prometheus-postgres-exporter.md create mode 100644 kustomize/prometheus-postgres-exporter/kustomization.yaml create mode 100644 kustomize/prometheus-postgres-exporter/values.yaml diff --git a/.github/workflows/kustomize-prometheus-postgres-exporter.yaml b/.github/workflows/kustomize-prometheus-postgres-exporter.yaml new file mode 100644 index 00000000..bb9d3025 --- /dev/null +++ b/.github/workflows/kustomize-prometheus-postgres-exporter.yaml @@ -0,0 +1,33 @@ +name: Kustomize GitHub Actions for Prometheus PostgresSQL exporter + +on: + pull_request: + paths: + - kustomize/prometheus-postgres-exporter/** + - .github/workflows/kustomize-prometheus-postgres-exporter.yaml +jobs: + kustomize: + name: Kustomize + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: azure/setup-helm@v3 + with: + version: latest + token: "${{ secrets.GITHUB_TOKEN }}" + id: helm + - name: Kustomize Install + working-directory: /usr/local/bin/ + run: | + if [ ! -f /usr/local/bin/kustomize ]; then + curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | sudo bash + fi + - name: Run Kustomize Build + run: | + kustomize build kustomize/prometheus-postgres-exporter/ --enable-helm --helm-command ${{ steps.helm.outputs.helm-path }} > /tmp/rendered.yaml + - name: Return Kustomize Build + uses: actions/upload-artifact@v2 + with: + name: kustomize-prometheus-postgres-exporter-artifact + path: /tmp/rendered.yaml diff --git a/docs/prometheus-postgres-exporter.md b/docs/prometheus-postgres-exporter.md new file mode 100644 index 00000000..d765821b --- /dev/null +++ b/docs/prometheus-postgres-exporter.md @@ -0,0 +1,18 @@ +# PostgresSQL Exporter + +PostgresSQL Exporter is used to expose metrics from a running PostgresSQL deployment. + +!!! note + + To deploy metric exporters you will first need to deploy the Prometheus Operator, see: ([Deploy Prometheus](prometheus.md)). + +## Installation + +Install the PostgresSQL Exporter + +``` shell +kubectl kustomize --enable-helm /opt/genestack/kustomize/prometheus-postgres-exporter | kubectl -n openstack apply -f - +``` + +!!! success + If the installation is successful, you should see the exporter pod in the openstack namespace. diff --git a/docs/prometheus-rabbitmq-exporter.md b/docs/prometheus-rabbitmq-exporter.md index bcec5324..1c2479b5 100644 --- a/docs/prometheus-rabbitmq-exporter.md +++ b/docs/prometheus-rabbitmq-exporter.md @@ -11,8 +11,7 @@ RabbitMQ Exporter is used to expose metrics from a running RabbitMQ deployment. Install the RabbitMQ Exporter ``` shell -kubectl kustomize --enable-helm /opt/genestack/kustomize/prometheus-rabbitmq-exporter | \ - kubectl -n openstack apply --server-side -f - +kubectl kustomize --enable-helm /opt/genestack/kustomize/prometheus-rabbitmq-exporter | kubectl -n openstack apply --server-side -f - ``` !!! success diff --git a/helm-configs/postgresql/postgresql-helm-overrides.yaml b/helm-configs/postgresql/postgresql-helm-overrides.yaml index 798aad04..679228c1 100644 --- a/helm-configs/postgresql/postgresql-helm-overrides.yaml +++ b/helm-configs/postgresql/postgresql-helm-overrides.yaml @@ -224,9 +224,9 @@ dependencies: monitoring: prometheus: - enabled: true + enabled: false postgresql_exporter: - scrape: true + scrape: false volume: backup: @@ -478,10 +478,10 @@ manifests: pvc_backup: false monitoring: prometheus: - configmap_bin: true - configmap_etc: true - deployment_exporter: true - job_user_create: true - secret_etc: true - service_exporter: true + configmap_bin: false + configmap_etc: false + deployment_exporter: false + job_user_create: false + secret_etc: false + service_exporter: false ... diff --git a/kustomize/prometheus-postgres-exporter/kustomization.yaml b/kustomize/prometheus-postgres-exporter/kustomization.yaml new file mode 100644 index 00000000..4461e2c4 --- /dev/null +++ b/kustomize/prometheus-postgres-exporter/kustomization.yaml @@ -0,0 +1,8 @@ +helmCharts: + - name: prometheus-postgres-exporter + repo: https://prometheus-community.github.io/helm-charts + releaseName: prometheus-postgres-exporter + namespace: openstack + version: 6.0.0 + includeCRDs: true + valuesFile: values.yaml diff --git a/kustomize/prometheus-postgres-exporter/values.yaml b/kustomize/prometheus-postgres-exporter/values.yaml new file mode 100644 index 00000000..01ebe1b9 --- /dev/null +++ b/kustomize/prometheus-postgres-exporter/values.yaml @@ -0,0 +1,259 @@ +replicaCount: 1 + +image: + registry: quay.io + repository: prometheuscommunity/postgres-exporter + # if not set appVersion field from Chart.yaml is used + tag: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +command: [] + +service: + type: ClusterIP + port: 9187 + targetPort: 9187 + name: http + labels: {} + annotations: {} + +automountServiceAccountToken: false + +serviceMonitor: + # When set true then use a ServiceMonitor to configure scraping + enabled: true + # Set the namespace the ServiceMonitor should be deployed + namespace: openstack + # Set how frequently Prometheus should scrape + # interval: 30s + # Set path to cloudwatch-exporter telemtery-path + # telemetryPath: /metrics + # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator + # labels: + # Set timeout for scrape + # timeout: 10s + # Set of labels to transfer from the Kubernetes Service onto the target + # targetLabels: [] + # MetricRelabelConfigs to apply to samples before ingestion + # metricRelabelings: [] + # Set relabel_configs as per https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + # relabelings: [] + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current prometheus-postgres-exporter service. + # - alert: HugeReplicationLag + # expr: pg_replication_lag{service="{{ template "prometheus-postgres-exporter.fullname" . }}"} / 3600 > 1 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: replication for {{ template "prometheus-postgres-exporter.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + +priorityClassName: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m +# memory: 128Mi + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Add annotations to the ServiceAccount, useful for EKS IAM Roles for Service Accounts or Google Workload Identity. + annotations: {} + +# Add a default ingress to allow namespace access to service.targetPort +# Helpful if other NetworkPolicies are configured in the namespace +networkPolicy: + # Specifies whether a NetworkPolicy should be created + enabled: false + # Set labels for the NetworkPolicy + labels: {} + +# The securityContext of the pod. +# See https://kubernetes.io/docs/concepts/policy/security-context/ for more. +podSecurityContext: + runAsGroup: 1001 + runAsUser: 1001 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + +# The securityContext of the container. +# See https://kubernetes.io/docs/concepts/policy/security-context/ for more. +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + +hostAliases: [] + # Set Host Aliases as per https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" +# - "bar.local" + +config: + ## The datasource properties on config are passed through helm tpl function. + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + datasource: + # Specify one of both datasource or datasourceSecret + host: postgresql + user: postgres + userSecret: {} + # Secret name + # name: + # User key inside secret + # key: + # Only one of password, passwordFile, passwordSecret and pgpassfile can be specified + password: + # Specify passwordFile if DB password is stored in a file. + # For example, to use with vault-injector from Hashicorp + passwordFile: '' + # Specify passwordSecret if DB password is stored in secret. + passwordSecret: + name: postgresql-db-admin + key: password + # Secret name + # name: + # Password key inside secret + # key: + pgpassfile: '' + # If pgpassfile is set, it is used to initialize the PGPASSFILE environment variable. + # See https://www.postgresql.org/docs/14/libpq-pgpass.html for more info. + port: "5432" + database: '' + sslmode: disable + extraParams: '' + datasourceSecret: {} + # Specifies if datasource should be sourced from secret value in format: postgresql://login:password@hostname:port/dbname?sslmode=disable + # Multiple Postgres databases can be configured by comma separated postgres connection strings + # Secret name + # name: + # Connection string key inside secret + # key: + disableCollectorDatabase: false + disableCollectorBgwriter: false + disableDefaultMetrics: false + disableSettingsMetrics: false + + # possible values debug, info, warn, error, fatal + logLevel: "" + # possible values logfmt, json + logFormat: "" + extraArgs: [] + + # postgres_exporter.yml + postgresExporter: "" + # auth_modules: + # first: + # type: userpass + # userpass: + # username: first + # password: firstpass + # options: + # sslmode: disable + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +annotations: { + prometheus.io/scrape: "true", + prometheus.io/path: "/metrics", + prometheus.io/port: "9187", +} + +podLabels: {} + +# Configurable health checks +livenessProbe: + initialDelaySeconds: 0 + timeoutSeconds: 3 + +readinessProbe: + initialDelaySeconds: 0 + timeoutSeconds: 1 + +# Labels and annotations to attach to the deployment resource +deployment: + labels: {} + annotations: { + prometheus.io/scrape: "true", + prometheus.io/path: "/metrics", + prometheus.io/port: "9187", + } + +# ExtraEnvs +extraEnvs: [] + # - name: EXTRA_ENV + # value: value + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: +# fieldPath: metadata.namespace + +# Init containers, e. g. for secrets creation before the exporter +initContainers: [] + # - name: + # image: + # volumeMounts: + # - name: creds +# mountPath: /creds + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: [] + +# Additional volumes, e. g. for secrets used in an extraContainer +extraVolumes: [] +# Uncomment for mounting custom ca-certificates +# - name: ssl-certs +# secret: +# defaultMode: 420 +# items: +# - key: ca-certificates.crt +# path: ca-certificates.crt +# secretName: ssl-certs + +# Additional volume mounts +extraVolumeMounts: [] +# Uncomment for mounting custom ca-certificates file into container +# - name: ssl-certs +# mountPath: /etc/ssl/certs/ca-certificates.crt +# subPath: ca-certificates.crt + +podDisruptionBudget: + enabled: false + maxUnavailable: 1 diff --git a/mkdocs.yml b/mkdocs.yml index 0c30a84a..d553449c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -189,7 +189,8 @@ nav: - MySQL Exporter: prometheus-mysql-exporter.md - RabbitMQ Exporter: prometheus-rabbitmq-exporter.md - Memcached Exporter: prometheus-memcached-exporter.md - - Postgres Exporter: prometheus-openstack-metrics-exporter.md + - Postgres Exporter: prometheus-postgres-exporter.md + - Openstack Exporter: prometheus-openstack-metrics-exporter.md - Operational Guide: - Running Genestack Upgrade: genestack-upgrade.md - Running Kubespray Upgrade: k8s-kubespray-upgrade.md From 0df2e227097f945e12ef1adbc8956419341c1490 Mon Sep 17 00:00:00 2001 From: Chris Blumentritt Date: Mon, 18 Mar 2024 11:28:59 -0500 Subject: [PATCH 06/20] Update examples with setting to forget unhealthy ingestors (#157) If one of the loki-write pods moves due to a different node the hash ring can become unhealthy. This will cause logs not to be sent to the backend. This will further lead to the other write pods to start filling up the volumes that they use and eventually cause dropped logs. Example error ``` ubuntu@overseer01:~$ k -n grafana logs daemonset.apps/loki-logs --tail 2 -f Found 37 pods, using pod/loki-logs-m9xvs ts=2024-03-15T14:42:17.533917001Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.57:9095" ts=2024-03-15T14:44:14.190670342Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.54:9095" ts=2024-03-15T14:47:22.746099384Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.57:9095" ts=2024-03-15T14:47:22.746172806Z caller=client.go:430 level=error component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="final error sending batch" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.57:9095" ts=2024-03-15T14:47:23.806786166Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.54:9095" ts=2024-03-15T14:47:24.644865006Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.57:9095" ts=2024-03-15T14:47:25.886090072Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.54:9095" ts=2024-03-15T14:47:29.833266958Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.54:9095" ts=2024-03-15T14:47:34.541167878Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.57:9095" ts=2024-03-15T14:47:44.494616126Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.54:9095" ts=2024-03-15T14:48:08.686557194Z caller=client.go:419 level=warn component=logs logs_config=grafana/loki component=client host=loki-gateway.grafana.svc.cluster.local msg="error sending batch, will retry" status=500 tenant= error="server returned HTTP status 500 Internal Server Error (500): at least 2 live replicas required, could only find 1 - unhealthy instances: 10.233.82.56:9095,10.233.82.54:9095" ``` Signed-off-by: Chris Blumentritt --- helm-configs/loki/loki-helm-minio-overrides-example.yaml | 2 ++ helm-configs/loki/loki-helm-s3-overrides-example.yaml | 2 ++ helm-configs/loki/loki-helm-swift-overrides-example.yaml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/helm-configs/loki/loki-helm-minio-overrides-example.yaml b/helm-configs/loki/loki-helm-minio-overrides-example.yaml index c77ada3a..83d76f72 100644 --- a/helm-configs/loki/loki-helm-minio-overrides-example.yaml +++ b/helm-configs/loki/loki-helm-minio-overrides-example.yaml @@ -5,3 +5,5 @@ minio: loki: auth_enabled: false configStorageType: Secret + ingester: + autoforget_unhealthy: true diff --git a/helm-configs/loki/loki-helm-s3-overrides-example.yaml b/helm-configs/loki/loki-helm-s3-overrides-example.yaml index 09730acf..e9a95086 100644 --- a/helm-configs/loki/loki-helm-s3-overrides-example.yaml +++ b/helm-configs/loki/loki-helm-s3-overrides-example.yaml @@ -5,6 +5,8 @@ minio: loki: auth_enabled: false configStorageType: Secret + ingester: + autoforget_unhealthy: true storage: bucketNames: chunks: < CHUNKS BUCKET NAME > # TODO: Update with relevant bucket name for chunks diff --git a/helm-configs/loki/loki-helm-swift-overrides-example.yaml b/helm-configs/loki/loki-helm-swift-overrides-example.yaml index 5e4155be..a28ae500 100644 --- a/helm-configs/loki/loki-helm-swift-overrides-example.yaml +++ b/helm-configs/loki/loki-helm-swift-overrides-example.yaml @@ -5,6 +5,8 @@ minio: loki: auth_enabled: false configStorageType: Secret + ingester: + autoforget_unhealthy: true storage: bucketNames: chunks: chunks From 7bd34470aefdcb7caff91233b94ea1ad859c4fa5 Mon Sep 17 00:00:00 2001 From: phillip-toohill Date: Mon, 18 Mar 2024 20:07:07 -0500 Subject: [PATCH 07/20] Updating prometheus with persistent volume claims --- kustomize/prometheus/values.yaml | 37 ++++++++++++++------------------ 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/kustomize/prometheus/values.yaml b/kustomize/prometheus/values.yaml index b4ae552f..8f579718 100644 --- a/kustomize/prometheus/values.yaml +++ b/kustomize/prometheus/values.yaml @@ -742,16 +742,14 @@ alertmanager: ## Storage is the definition of how storage will be used by the Alertmanager instances. ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md ## - storage: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - + storage: + volumeClaimTemplate: + spec: + storageClassName: general + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 15Gi ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false ## @@ -3572,17 +3570,14 @@ prometheus: ## Prometheus StorageSpec for persistent data ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md ## - storageSpec: {} - ## Using PersistentVolumeClaim - ## - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: general + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 15Gi ## Using tmpfs volume ## From 34a3356fe75ff1f4b02b7b7f2637119cf0ff2170 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Mon, 18 Mar 2024 22:45:07 -0500 Subject: [PATCH 08/20] Fix: Upgrade the mariadb operator to 0.0.27 Signed-off-by: Kevin Carter --- kustomize/mariadb-cluster/base/mariadb-galera.yaml | 4 ++-- kustomize/mariadb-operator/kustomization.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kustomize/mariadb-cluster/base/mariadb-galera.yaml b/kustomize/mariadb-cluster/base/mariadb-galera.yaml index 55ea560d..6fc77a9e 100644 --- a/kustomize/mariadb-cluster/base/mariadb-galera.yaml +++ b/kustomize/mariadb-cluster/base/mariadb-galera.yaml @@ -54,7 +54,7 @@ spec: galeraLibPath: /usr/lib/galera/libgalera_smm.so replicaThreads: 1 agent: - image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.26 + image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27 port: 5555 kubernetesAuth: enabled: true @@ -67,7 +67,7 @@ spec: podRecoveryTimeout: 3m podSyncTimeout: 3m initContainer: - image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.26 + image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27 config: reuseStorageVolume: false volumeClaimTemplate: diff --git a/kustomize/mariadb-operator/kustomization.yaml b/kustomize/mariadb-operator/kustomization.yaml index 737d7986..4f78600f 100644 --- a/kustomize/mariadb-operator/kustomization.yaml +++ b/kustomize/mariadb-operator/kustomization.yaml @@ -14,5 +14,5 @@ helmCharts: metrics: enabled: true includeCRDs: true - version: 0.26.0 + version: 0.27.0 namespace: mariadb-system From 1fc182cdae268ac64a6025ba024d46085717faf3 Mon Sep 17 00:00:00 2001 From: Luke Repko Date: Tue, 19 Mar 2024 08:50:17 -0500 Subject: [PATCH 09/20] feat: introduce ceilometer helm chart overrides (#128) * feat: introduce ceilometer helm chart overrides This begins to add the overrides for the Ceilometer helm chart. Ceilometer provides metering, monitoring, and alarming capabilities in Openstack for billing, performance, optimization, and capacity planning purposes. * fix: ceilometer-api image is deprecated Trying to pull the wallaby image for ceilometer-api results in a 404 not found now. ceilometer-api is in the base image as of commit cd67930 per the upstream kolla repo, so pull that instead. * fix: remove ceilometer-collector from config ceilometer collector was removed from ceilometer code base[1] [1] https://review.openstack.org/504244 * fix: ceilometer-api is fully deprecated so rm it This disables deployment of the api pod and removes related api configuration as ceilometer no longer has a rest API. It is simply a worker service at this point. Gnocchi API is preferred over ceilometer. * fix: set database keys to fake values The database section is not used, but the base chart still tries to set some sane default values, so to avoid confusion, just override those to a string value that makes it obvious this section is not used. The recommended storage location for meters and events is Gnocchi, which is automatically discovered and used by means of keystone. * fix: set gnocchi as the publisher This was explicitly set to notify:// without any context as to what that is or does. The configuration does not list that as a valid value, so let's replace the publisher with the default, `gnocchi`. * fix: disable the ks-endpoint job There is no endpoint for ceilometer anymore so remove the related job that makes a service in keystone for one. * fix: bump ceilometer images to yoga This was the newest tagged image that I could find for Ceilometer. We will need to investigate building our own Ceilometer images for a later release of Genstack. * fix: enable db-sync to init gnocchi resource types The helm chart has a db_sync job which executes ceilometer-upgrade which executes the storage upgrade function that initiates the resource types in gnocchi with their attributes. * fix: add updated event definitions from yoga The event definitions defined in the helm chart were very dated, update them to match those found in the yoga release. * fix: update gnocchi resources to yoga The gnocchi resources were outdated. This updates them to match what was released with Yoga. * fix: update ceilometer meters to yoga The existing meters were outdated. This brings them up to date with the yoga release. * fix: simplify pipeline sinks for now This removes some complexity that the original helm chart introduced which defines custom meter sinks relating to instance cpu, disk, and net metrics. We may find ourselves disabling pollsters for individual instances, so let's not inundate the pipeline with un-necessary complexity yet. If we find they are useful or needed, we can re-enable them after verifying their proper operation. The polled metrics will still be stored in Gnocchi, just not transformed according to the defined sinks. Iff re-introduced, these pipeline sinks may need to be further tweaked to work with the updated event defs. * fix: enable postgresql backup jobs * fix: add gnocchi API replicas & enable daemonsets This should make Gnocchi more reliable and have better overall perf. * fix: disable resource limits for ceilometer We don't enforce pod resource limits in other helm charts so set this to false as the default. * fix: remove apache2 config for ceilometer Ceilometer no longer has a rest API so let's remove this section from the overrides. * fix: Add default loglevels to aid troubleshooting When troubleshooting, it helps to raise or lower default log levels of specific modules, setting requests related loggers to DEBUG for example can help one diagnose ceilometer CRUD operations. * doc: add openstack ceilometer installation * fix: set postgresql cron backup to 0015 once a day The default was midnight but a lot of jobs run then; kick this off a little later to help avoid the thundering herd affect. --- docs/openstack-ceilometer.md | 80 + .../ceilometer/ceilometer-helm-overrides.yaml | 2182 +++++++++++++++++ .../gnocchi/gnocchi-helm-overrides.yaml | 6 +- .../postgresql/postgresql-helm-overrides.yaml | 12 +- mkdocs.yml | 1 + 5 files changed, 2272 insertions(+), 9 deletions(-) create mode 100644 docs/openstack-ceilometer.md create mode 100644 helm-configs/ceilometer/ceilometer-helm-overrides.yaml diff --git a/docs/openstack-ceilometer.md b/docs/openstack-ceilometer.md new file mode 100644 index 00000000..d43f1879 --- /dev/null +++ b/docs/openstack-ceilometer.md @@ -0,0 +1,80 @@ +# Deploy Ceilometer + +## Create Secrets + +```shell +kubectl --namespace openstack create secret generic ceilometer-keystone-admin-password \ + --type Opaque \ + --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" +kubectl --namespace openstack create secret generic ceilometer-keystone-test-password \ + --type Opaque \ + --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" +kubectl --namespace openstack create secret generic ceilometer-rabbitmq-password \ + --type Opaque \ + --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" +``` + +## Run the package deployment + +```shell +cd /opt/genestack/submodules/openstack-helm +helm upgrade --install ceilometer ./ceilometer \ + --namespace=openstack \ + --wait \ + --timeout 10m \ + -f /opt/genestack/helm-configs/ceilometer/ceilometer-helm-overrides.yaml \ + --set endpoints.identity.auth.admin.password="$(kubectl --namespace openstack get secret keystone-admin -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.identity.auth.ceilometer.password="$(kubectl --namespace openstack get secret ceilometer-keystone-admin-password -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.identity.auth.test.password="$(kubectl --namespace openstack get secret ceilometer-keystone-test-password -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.oslo_messaging.auth.admin.username="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.username}' | base64 -d)" \ + --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.oslo_messaging.auth.ceilometer.password="$(kubectl --namespace openstack get secret ceilometer-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ + --set conf.ceilometer.oslo_messaging_notifications.transport_url="\ +rabbit://ceilometer:$(kubectl --namespace openstack get secret ceilometer-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/ceilometer"\ + --set conf.ceilometer.notification.messaging_urls.values="{\ +rabbit://ceilometer:$(kubectl --namespace openstack get secret ceilometer-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/ceilometer,\ +rabbit://cinder:$(kubectl --namespace openstack get secret cinder-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/cinder,\ +rabbit://glance:$(kubectl --namespace openstack get secret glance-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/glance,\ +rabbit://heat:$(kubectl --namespace openstack get secret heat-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/heat,\ +rabbit://keystone:$(kubectl --namespace openstack get secret keystone-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/keystone,\ +rabbit://neutron:$(kubectl --namespace openstack get secret neutron-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/neutron,\ +rabbit://nova:$(kubectl --namespace openstack get secret nova-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/nova}" +``` + +!!! tip + + In a production like environment you may need to include production specific files like the example variable file found in `helm-configs/prod-example-openstack-overrides.yaml`. + +## Verify Ceilometer Workers + +As there is no Ceilometer API, we will do a quick validation against the +Gnocchi API via a series of `openstack metric` commands to confirm that +Ceilometer workers are ingesting metric and event data then persisting them +storage. + +### Verify metric resource types exist + +The Ceilomter db-sync job will create the various resource types in Gnocchi. +Without them, metrics can't be stored, so let's verify they exist. The +output should include named resource types and some attributes for resources +like `instance`, `instance_disk`, `network`, `volume`, etc. + +```shell +kubectl exec -it openstack-admin-client -n openstack -- openstack metric resource-type list +``` + +### Verify metric resources + +Confirm that resources are populating in Gnocchi + +```shell +kubectl exec -it openstack-admin-client -n openstack -- openstack metric resource list +``` + +### Verify metrics + +Confirm that metrics can be retrieved from Gnocchi + +```shell +kubectl exec -it openstack-admin-client -n openstack -- openstack metric list +``` diff --git a/helm-configs/ceilometer/ceilometer-helm-overrides.yaml b/helm-configs/ceilometer/ceilometer-helm-overrides.yaml new file mode 100644 index 00000000..952324d7 --- /dev/null +++ b/helm-configs/ceilometer/ceilometer-helm-overrides.yaml @@ -0,0 +1,2182 @@ +--- +release_group: null + +labels: + compute: + node_selector_key: openstack-compute-node + node_selector_value: enabled + central: + node_selector_key: openstack-control-plane + node_selector_value: enabled + ipmi: + node_selector_key: openstack-node + node_selector_value: enabled + notification: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + test: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + tags: + test: docker.io/xrally/xrally-openstack:2.0.0 + ceilometer_db_sync: docker.io/kolla/ubuntu-source-ceilometer-base:yoga + rabbit_init: docker.io/rabbitmq:3.7-management + ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal + ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal + ceilometer_central: docker.io/kolla/ubuntu-source-ceilometer-central:yoga + ceilometer_compute: docker.io/kolla/ubuntu-source-ceilometer-compute:yoga + ceilometer_ipmi: docker.io/kolla/ubuntu-source-ceilometer-base:yoga + ceilometer_notification: docker.io/kolla/ubuntu-source-ceilometer-notification:yoga + dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: docker.io/docker:17.07.0 + pull_policy: "IfNotPresent" + local_registry: + active: false + exclude: + - dep_check + - image_repo_sync + +ipmi_device: /dev/ipmi0 + +conf: + ceilometer: + DEFAULT: + debug: "false" +# default_log_levels: >- +# amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO, +# oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=DEBUG, +# urllib3.connectionpool=DEBUG,websocket=WARN,requests.packages.urllib3.util.retry=DEBUG, +# urllib3.util.retry=DEBUG,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN, +# taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO + event_dispatchers: + type: multistring + values: + - gnocchi + meter_dispatchers: + type: multistring + values: + - gnocchi + api: + aodh_is_enabled: "False" + aodh_url: "NotUsed" + dispatcher_gnocchi: + filter_service_activity: False + archive_policy: low + resources_definition_file: /etc/ceilometer/gnocchi_resources.yaml + database: + connection: "NotUsed" + event_connection: "NotUsed" + metering_connection: "NotUsed" + max_retries: -1 + dispatcher: + archive_policy: low + filter_project: service + keystone_authtoken: + auth_type: password + auth_version: v3 + service_credentials: + auth_type: password + interface: internal + notification: + messaging_urls: + type: multistring + values: + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/ceilometer + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/cinder + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/glance + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/nova + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/keystone + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/neutron + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/heat + oslo_messaging_notifications: + driver: messagingv2 + topics: + - notifications + - profiler + oslo_policy: + policy_file: /etc/ceilometer/policy.yaml + cache: + enabled: true + backend: dogpile.cache.memcached + expiration_time: 86400 + event_definitions: + - event_type: 'compute.instance.*' + traits: &instance_traits + tenant_id: + fields: payload.tenant_id + user_id: + fields: payload.user_id + instance_id: + fields: payload.instance_id + display_name: + fields: payload.display_name + resource_id: + fields: payload.instance_id + cell_name: + fields: payload.cell_name + host: + fields: publisher_id.`split(., 1, 1)` + service: + fields: publisher_id.`split(., 0, -1)` + memory_mb: + type: int + fields: payload.memory_mb + disk_gb: + type: int + fields: payload.disk_gb + root_gb: + type: int + fields: payload.root_gb + ephemeral_gb: + type: int + fields: payload.ephemeral_gb + vcpus: + type: int + fields: payload.vcpus + instance_type_id: + fields: payload.instance_type_id + instance_type: + fields: payload.instance_type + state: + fields: payload.state + os_architecture: + fields: payload.image_meta.'org.openstack__1__architecture' + os_version: + fields: payload.image_meta.'org.openstack__1__os_version' + os_distro: + fields: payload.image_meta.'org.openstack__1__os_distro' + launched_at: + type: datetime + fields: payload.launched_at + deleted_at: + type: datetime + fields: payload.deleted_at + - event_type: compute.instance.create.end + traits: + <<: *instance_traits + availability_zone: + fields: payload.availability_zone + - event_type: compute.instance.update + traits: + <<: *instance_traits + old_state: + fields: payload.old_state + - event_type: compute.instance.exists + traits: + <<: *instance_traits + audit_period_beginning: + type: datetime + fields: payload.audit_period_beginning + audit_period_ending: + type: datetime + fields: payload.audit_period_ending + - event_type: ['volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*', 'volume.transfer.accept.end', 'snapshot.transfer.accept.end'] + traits: &cinder_traits + user_id: + fields: payload.user_id + project_id: + fields: payload.tenant_id + availability_zone: + fields: payload.availability_zone + display_name: + fields: payload.display_name + replication_status: + fields: payload.replication_status + status: + fields: payload.status + created_at: + type: datetime + fields: payload.created_at + image_id: + fields: payload.glance_metadata[?key=image_id].value + instance_id: + fields: payload.volume_attachment[0].server_id + - event_type: ['volume.transfer.*', 'volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.transfer.accept.end'] + traits: + <<: *cinder_traits + resource_id: + fields: payload.volume_id + host: + fields: payload.host + size: + type: int + fields: payload.size + type: + fields: payload.volume_type + replication_status: + fields: payload.replication_status + - event_type: ['snapshot.transfer.accept.end'] + traits: + <<: *cinder_traits + resource_id: + fields: payload.snapshot_id + project_id: + fields: payload.tenant_id + - event_type: ['share.create.*', 'share.delete.*', 'share.extend.*', 'share.shrink.*'] + traits: &share_traits + share_id: + fields: payload.share_id + user_id: + fields: payload.user_id + project_id: + fields: payload.tenant_id + snapshot_id: + fields: payload.snapshot_id + availability_zone: + fields: payload.availability_zone + status: + fields: payload.status + created_at: + type: datetime + fields: payload.created_at + share_group_id: + fields: payload.share_group_id + size: + type: int + fields: payload.size + name: + fields: payload.name + proto: + fields: payload.proto + is_public: + fields: payload.is_public + description: + fields: payload.description + host: + fields: payload.host + - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] + traits: + <<: *cinder_traits + resource_id: + fields: payload.snapshot_id + volume_id: + fields: payload.volume_id + - event_type: ['image_volume_cache.*'] + traits: + image_id: + fields: payload.image_id + host: + fields: payload.host + - event_type: ['image.create', 'image.update', 'image.upload', 'image.delete'] + traits: &glance_crud + project_id: + fields: payload.owner + resource_id: + fields: payload.id + name: + fields: payload.name + status: + fields: payload.status + created_at: + type: datetime + fields: payload.created_at + user_id: + fields: payload.owner + deleted_at: + type: datetime + fields: payload.deleted_at + size: + type: int + fields: payload.size + - event_type: image.send + traits: &glance_send + receiver_project: + fields: payload.receiver_tenant_id + receiver_user: + fields: payload.receiver_user_id + user_id: + fields: payload.owner_id + image_id: + fields: payload.image_id + destination_ip: + fields: payload.destination_ip + bytes_sent: + type: int + fields: payload.bytes_sent + - event_type: orchestration.stack.* + traits: &orchestration_crud + project_id: + fields: payload.tenant_id + user_id: + fields: ['ctxt.trustor_user_id', 'ctxt.user_id'] + resource_id: + fields: payload.stack_identity + name: + fields: payload.name + - event_type: sahara.cluster.* + traits: &sahara_crud + project_id: + fields: payload.project_id + user_id: + fields: ctxt.user_id + resource_id: + fields: payload.cluster_id + name: + fields: payload.name + - event_type: sahara.cluster.health + traits: &sahara_health + <<: *sahara_crud + verification_id: + fields: payload.verification_id + health_check_status: + fields: payload.health_check_status + health_check_name: + fields: payload.health_check_name + health_check_description: + fields: payload.health_check_description + created_at: + type: datetime + fields: payload.created_at + updated_at: + type: datetime + fields: payload.updated_at + - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', + 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] + traits: &identity_crud + resource_id: + fields: payload.resource_info + initiator_id: + fields: payload.initiator.id + project_id: + fields: payload.initiator.project_id + domain_id: + fields: payload.initiator.domain_id + - event_type: identity.role_assignment.* + traits: &identity_role_assignment + role: + fields: payload.role + group: + fields: payload.group + domain: + fields: payload.domain + user: + fields: payload.user + project: + fields: payload.project + - event_type: identity.authenticate + traits: &identity_authenticate + typeURI: + fields: payload.typeURI + id: + fields: payload.id + action: + fields: payload.action + eventType: + fields: payload.eventType + eventTime: + type: datetime + fields: payload.eventTime + outcome: + fields: payload.outcome + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_id: + fields: payload.initiator.id + initiator_name: + fields: payload.initiator.name + initiator_host_agent: + fields: payload.initiator.host.agent + initiator_host_addr: + fields: payload.initiator.host.address + target_typeURI: + fields: payload.target.typeURI + target_id: + fields: payload.target.id + observer_typeURI: + fields: payload.observer.typeURI + observer_id: + fields: payload.observer.id + - event_type: objectstore.http.request + traits: &objectstore_request + typeURI: + fields: payload.typeURI + id: + fields: payload.id + action: + fields: payload.action + eventType: + fields: payload.eventType + eventTime: + type: datetime + fields: payload.eventTime + outcome: + fields: payload.outcome + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_id: + fields: payload.initiator.id + initiator_project_id: + fields: payload.initiator.project_id + target_typeURI: + fields: payload.target.typeURI + target_id: + fields: payload.target.id + target_action: + fields: payload.target.action + target_metadata_path: + fields: payload.target.metadata.path + target_metadata_version: + fields: payload.target.metadata.version + target_metadata_container: + fields: payload.target.metadata.container + target_metadata_object: + fields: payload.target.metadata.object + observer_id: + fields: payload.observer.id + - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] + traits: &network_traits + user_id: + fields: ctxt.user_id + project_id: + fields: ctxt.tenant_id + - event_type: network.* + traits: + <<: *network_traits + name: + fields: payload.network.name + resource_id: + fields: ['payload.network.id', 'payload.id'] + - event_type: subnet.* + traits: + <<: *network_traits + name: + fields: payload.subnet.name + resource_id: + fields: ['payload.subnet.id', 'payload.id'] + - event_type: port.* + traits: + <<: *network_traits + name: + fields: payload.port.name + resource_id: + fields: ['payload.port.id', 'payload.id'] + - event_type: router.* + traits: + <<: *network_traits + name: + fields: payload.router.name + resource_id: + fields: ['payload.router.id', 'payload.id'] + - event_type: floatingip.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.floatingip.id', 'payload.id'] + - event_type: pool.* + traits: + <<: *network_traits + name: + fields: payload.pool.name + resource_id: + fields: ['payload.pool.id', 'payload.id'] + - event_type: vip.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.vip.id', 'payload.id'] + - event_type: member.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.member.id', 'payload.id'] + - event_type: health_monitor.* + traits: + <<: *network_traits + name: + fields: payload.health_monitor.name + resource_id: + fields: ['payload.health_monitor.id', 'payload.id'] + - event_type: healthmonitor.* + traits: + <<: *network_traits + name: + fields: payload.healthmonitor.name + resource_id: + fields: ['payload.healthmonitor.id', 'payload.id'] + - event_type: listener.* + traits: + <<: *network_traits + name: + fields: payload.listener.name + resource_id: + fields: ['payload.listener.id', 'payload.id'] + - event_type: loadbalancer.* + traits: + <<: *network_traits + name: + fields: payload.loadbalancer.name + resource_id: + fields: ['payload.loadbalancer.id', 'payload.id'] + - event_type: firewall.* + traits: + <<: *network_traits + name: + fields: payload.firewall.name + resource_id: + fields: ['payload.firewall.id', 'payload.id'] + - event_type: firewall_policy.* + traits: + <<: *network_traits + name: + fields: payload.firewall_policy.name + resource_id: + fields: ['payload.firewall_policy.id', 'payload.id'] + - event_type: firewall_rule.* + traits: + <<: *network_traits + name: + fields: payload.firewall_rule.name + resource_id: + fields: ['payload.firewall_rule.id', 'payload.id'] + - event_type: vpnservice.* + traits: + <<: *network_traits + name: + fields: payload.vpnservice.name + resource_id: + fields: ['payload.vpnservice.id', 'payload.id'] + - event_type: ipsecpolicy.* + traits: + <<: *network_traits + name: + fields: payload.ipsecpolicy.name + resource_id: + fields: ['payload.ipsecpolicy.id', 'payload.id'] + - event_type: ikepolicy.* + traits: + <<: *network_traits + name: + fields: payload.ikepolicy.name + resource_id: + fields: ['payload.ikepolicy.id', 'payload.id'] + - event_type: ipsec_site_connection.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.ipsec_site_connection.id', 'payload.id'] + - event_type: '*http.*' + traits: &http_audit + project_id: + fields: payload.initiator.project_id + user_id: + fields: payload.initiator.id + typeURI: + fields: payload.typeURI + eventType: + fields: payload.eventType + action: + fields: payload.action + outcome: + fields: payload.outcome + id: + fields: payload.id + eventTime: + type: datetime + fields: payload.eventTime + requestPath: + fields: payload.requestPath + observer_id: + fields: payload.observer.id + target_id: + fields: payload.target.id + target_typeURI: + fields: payload.target.typeURI + target_name: + fields: payload.target.name + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_id: + fields: payload.initiator.id + initiator_name: + fields: payload.initiator.name + initiator_host_address: + fields: payload.initiator.host.address + - event_type: '*http.response' + traits: + <<: *http_audit + reason_code: + fields: payload.reason.reasonCode + - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] + traits: &dns_domain_traits + status: + fields: payload.status + retry: + fields: payload.retry + description: + fields: payload.description + expire: + fields: payload.expire + email: + fields: payload.email + ttl: + fields: payload.ttl + action: + fields: payload.action + name: + fields: payload.name + resource_id: + fields: payload.id + created_at: + type: datetime + fields: payload.created_at + updated_at: + type: datetime + fields: payload.updated_at + version: + fields: payload.version + parent_domain_id: + fields: parent_domain_id + serial: + fields: payload.serial + - event_type: dns.domain.exists + traits: + <<: *dns_domain_traits + audit_period_beginning: + type: datetime + fields: payload.audit_period_beginning + audit_period_ending: + type: datetime + fields: payload.audit_period_ending + - event_type: trove.* + traits: &trove_base_traits + instance_type: + fields: payload.instance_type + user_id: + fields: payload.user_id + resource_id: + fields: payload.instance_id + instance_type_id: + fields: payload.instance_type_id + launched_at: + type: datetime + fields: payload.launched_at + instance_name: + fields: payload.instance_name + state: + fields: payload.state + nova_instance_id: + fields: payload.nova_instance_id + service_id: + fields: payload.service_id + created_at: + type: datetime + fields: payload.created_at + region: + fields: payload.region + - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] + traits: &trove_common_traits + name: + fields: payload.name + availability_zone: + fields: payload.availability_zone + instance_size: + type: int + fields: payload.instance_size + volume_size: + type: int + fields: payload.volume_size + nova_volume_id: + fields: payload.nova_volume_id + - event_type: trove.instance.create + traits: + <<: [*trove_base_traits, *trove_common_traits] + - event_type: trove.instance.modify_volume + traits: + <<: [*trove_base_traits, *trove_common_traits] + old_volume_size: + type: int + fields: payload.old_volume_size + modify_at: + type: datetime + fields: payload.modify_at + - event_type: trove.instance.modify_flavor + traits: + <<: [*trove_base_traits, *trove_common_traits] + old_instance_size: + type: int + fields: payload.old_instance_size + modify_at: + type: datetime + fields: payload.modify_at + - event_type: trove.instance.delete + traits: + <<: [*trove_base_traits, *trove_common_traits] + deleted_at: + type: datetime + fields: payload.deleted_at + - event_type: trove.instance.exists + traits: + <<: *trove_base_traits + display_name: + fields: payload.display_name + audit_period_beginning: + type: datetime + fields: payload.audit_period_beginning + audit_period_ending: + type: datetime + fields: payload.audit_period_ending + - event_type: profiler.* + traits: + project: + fields: payload.project + service: + fields: payload.service + name: + fields: payload.name + base_id: + fields: payload.base_id + trace_id: + fields: payload.trace_id + parent_id: + fields: payload.parent_id + timestamp: + type: datetime + fields: payload.timestamp + host: + fields: payload.info.host + path: + fields: payload.info.request.path + query: + fields: payload.info.request.query + method: + fields: payload.info.request.method + scheme: + fields: payload.info.request.scheme + db.statement: + fields: payload.info.db.statement + db.params: + fields: payload.info.db.params + - event_type: 'magnum.cluster.*' + traits: &magnum_cluster_crud + id: + fields: payload.id + typeURI: + fields: payload.typeURI + eventType: + fields: payload.eventType + eventTime: + type: datetime + fields: payload.eventTime + action: + fields: payload.action + outcome: + fields: payload.outcome + initiator_id: + fields: payload.initiator.id + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_name: + fields: payload.initiator.name + initiator_host_agent: + fields: payload.initiator.host.agent + initiator_host_address: + fields: payload.initiator.host.address + target_id: + fields: payload.target.id + target_typeURI: + fields: payload.target.typeURI + observer_id: + fields: payload.observer.id + observer_typeURI: + fields: payload.observer.typeURI + - event_type: 'alarm.*' + traits: + id: + fields: payload.alarm_id + user_id: + fields: payload.user_id + project_id: + fields: payload.project_id + on_behalf_of: + fields: payload.on_behalf_of + severity: + fields: payload.severity + detail: + fields: payload.detail + type: + fields: payload.type + + gnocchi_resources: + archive_policy_default: ceilometer-low + archive_policies: + # NOTE(sileht): We keep "mean" for now to not break all gating that + # use the current tempest scenario. + - name: ceilometer-low + aggregation_methods: + - mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days + - name: ceilometer-low-rate + aggregation_methods: + - mean + - rate:mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days + - name: ceilometer-high + aggregation_methods: + - mean + back_window: 0 + definition: + - granularity: 1 second + timespan: 1 hour + - granularity: 1 minute + timespan: 1 day + - granularity: 1 hour + timespan: 365 days + - name: ceilometer-high-rate + aggregation_methods: + - mean + - rate:mean + back_window: 0 + definition: + - granularity: 1 second + timespan: 1 hour + - granularity: 1 minute + timespan: 1 day + - granularity: 1 hour + timespan: 365 days + + resources: + - resource_type: identity + metrics: + identity.authenticate.success: + identity.authenticate.pending: + identity.authenticate.failure: + identity.user.created: + identity.user.deleted: + identity.user.updated: + identity.group.created: + identity.group.deleted: + identity.group.updated: + identity.role.created: + identity.role.deleted: + identity.role.updated: + identity.project.created: + identity.project.deleted: + identity.project.updated: + identity.trust.created: + identity.trust.deleted: + identity.role_assignment.created: + identity.role_assignment.deleted: + + - resource_type: ceph_account + metrics: + radosgw.objects: + radosgw.objects.size: + radosgw.objects.containers: + radosgw.api.request: + radosgw.containers.objects: + radosgw.containers.objects.size: + + - resource_type: instance + metrics: + memory: + memory.usage: + memory.resident: + memory.swap.in: + memory.swap.out: + memory.bandwidth.total: + memory.bandwidth.local: + vcpus: + cpu: + archive_policy_name: ceilometer-low-rate + cpu_l3_cache: + disk.root.size: + disk.ephemeral.size: + disk.latency: + disk.iops: + disk.capacity: + disk.allocation: + disk.usage: + compute.instance.booting.time: + perf.cpu.cycles: + perf.instructions: + perf.cache.references: + perf.cache.misses: + attributes: + host: resource_metadata.(instance_host|host) + image_ref: resource_metadata.image_ref + launched_at: resource_metadata.launched_at + created_at: resource_metadata.created_at + deleted_at: resource_metadata.deleted_at + display_name: resource_metadata.display_name + flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) + flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) + server_group: resource_metadata.user_metadata.server_group + event_delete: compute.instance.delete.start + event_create: compute.instance.create.end + event_attributes: + id: instance_id + display_name: display_name + host: host + availability_zone: availability_zone + flavor_id: instance_type_id + flavor_name: instance_type + user_id: user_id + project_id: project_id + event_associated_resources: + instance_network_interface: '{"=": {"instance_id": "%s"}}' + instance_disk: '{"=": {"instance_id": "%s"}}' + + - resource_type: instance_network_interface + metrics: + network.outgoing.packets: + archive_policy_name: ceilometer-low-rate + network.incoming.packets: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.drop: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.drop: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.error: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.error: + archive_policy_name: ceilometer-low-rate + network.outgoing.bytes: + archive_policy_name: ceilometer-low-rate + network.incoming.bytes: + archive_policy_name: ceilometer-low-rate + attributes: + name: resource_metadata.vnic_name + instance_id: resource_metadata.instance_id + + - resource_type: instance_disk + metrics: + disk.device.read.requests: + archive_policy_name: ceilometer-low-rate + disk.device.write.requests: + archive_policy_name: ceilometer-low-rate + disk.device.read.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.write.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.latency: + disk.device.read.latency: + disk.device.write.latency: + disk.device.iops: + disk.device.capacity: + disk.device.allocation: + disk.device.usage: + attributes: + name: resource_metadata.disk_name + instance_id: resource_metadata.instance_id + + - resource_type: image + metrics: + image.size: + image.download: + image.serve: + attributes: + name: resource_metadata.name + container_format: resource_metadata.container_format + disk_format: resource_metadata.disk_format + event_delete: image.delete + event_attributes: + id: resource_id + + - resource_type: ipmi + metrics: + hardware.ipmi.node.power: + hardware.ipmi.node.temperature: + hardware.ipmi.node.inlet_temperature: + hardware.ipmi.node.outlet_temperature: + hardware.ipmi.node.fan: + hardware.ipmi.node.current: + hardware.ipmi.node.voltage: + hardware.ipmi.node.airflow: + hardware.ipmi.node.cups: + hardware.ipmi.node.cpu_util: + hardware.ipmi.node.mem_util: + hardware.ipmi.node.io_util: + + - resource_type: ipmi_sensor + metrics: + - 'hardware.ipmi.power' + - 'hardware.ipmi.temperature' + - 'hardware.ipmi.current' + - 'hardware.ipmi.voltage' + attributes: + node: resource_metadata.node + + - resource_type: network + metrics: + bandwidth: + ip.floating: + event_delete: floatingip.delete.end + event_attributes: + id: resource_id + + - resource_type: stack + metrics: + stack.create: + stack.update: + stack.delete: + stack.resume: + stack.suspend: + + - resource_type: swift_account + metrics: + storage.objects.incoming.bytes: + storage.objects.outgoing.bytes: + storage.objects.size: + storage.objects: + storage.objects.containers: + storage.containers.objects: + storage.containers.objects.size: + + - resource_type: volume + metrics: + volume: + volume.size: + snapshot.size: + volume.snapshot.size: + volume.backup.size: + backup.size: + volume.manage_existing.start: + volume.manage_existing.end: + volume.manage_existing_snapshot.start: + volume.manage_existing_snapshot.end: + attributes: + display_name: resource_metadata.(display_name|name) + volume_type: resource_metadata.volume_type + image_id: resource_metadata.image_id + instance_id: resource_metadata.instance_id + event_delete: + - volume.delete.end + - snapshot.delete.end + event_update: + - volume.transfer.accept.end + - snapshot.transfer.accept.end + event_attributes: + id: resource_id + project_id: project_id + + - resource_type: volume_provider + metrics: + volume.provider.capacity.total: + volume.provider.capacity.free: + volume.provider.capacity.allocated: + volume.provider.capacity.provisioned: + volume.provider.capacity.virtual_free: + + - resource_type: volume_provider_pool + metrics: + volume.provider.pool.capacity.total: + volume.provider.pool.capacity.free: + volume.provider.pool.capacity.allocated: + volume.provider.pool.capacity.provisioned: + volume.provider.pool.capacity.virtual_free: + attributes: + provider: resource_metadata.provider + + - resource_type: host + metrics: + hardware.cpu.load.1min: + hardware.cpu.load.5min: + hardware.cpu.load.15min: + hardware.cpu.util: + hardware.cpu.user: + archive_policy_name: ceilometer-low-rate + hardware.cpu.nice: + archive_policy_name: ceilometer-low-rate + hardware.cpu.system: + archive_policy_name: ceilometer-low-rate + hardware.cpu.idle: + archive_policy_name: ceilometer-low-rate + hardware.cpu.wait: + archive_policy_name: ceilometer-low-rate + hardware.cpu.kernel: + archive_policy_name: ceilometer-low-rate + hardware.cpu.interrupt: + archive_policy_name: ceilometer-low-rate + hardware.memory.total: + hardware.memory.used: + hardware.memory.swap.total: + hardware.memory.swap.avail: + hardware.memory.buffer: + hardware.memory.cached: + hardware.network.ip.outgoing.datagrams: + hardware.network.ip.incoming.datagrams: + hardware.system_stats.cpu.idle: + hardware.system_stats.io.outgoing.blocks: + hardware.system_stats.io.incoming.blocks: + attributes: + host_name: resource_metadata.resource_url + + - resource_type: host_disk + metrics: + hardware.disk.size.total: + hardware.disk.size.used: + hardware.disk.read.bytes: + hardware.disk.write.bytes: + hardware.disk.read.requests: + hardware.disk.write.requests: + attributes: + host_name: resource_metadata.resource_url + device_name: resource_metadata.device + + - resource_type: host_network_interface + metrics: + hardware.network.incoming.bytes: + hardware.network.outgoing.bytes: + hardware.network.outgoing.errors: + attributes: + host_name: resource_metadata.resource_url + device_name: resource_metadata.name + + - resource_type: nova_compute + metrics: + compute.node.cpu.frequency: + compute.node.cpu.idle.percent: + compute.node.cpu.idle.time: + compute.node.cpu.iowait.percent: + compute.node.cpu.iowait.time: + compute.node.cpu.kernel.percent: + compute.node.cpu.kernel.time: + compute.node.cpu.percent: + compute.node.cpu.user.percent: + compute.node.cpu.user.time: + attributes: + host_name: resource_metadata.host + + - resource_type: manila_share + metrics: + manila.share.size: + attributes: + name: resource_metadata.name + host: resource_metadata.host + status: resource_metadata.status + availability_zone: resource_metadata.availability_zone + protocol: resource_metadata.protocol + + - resource_type: switch + metrics: + switch: + switch.ports: + attributes: + controller: resource_metadata.controller + + - resource_type: switch_port + metrics: + switch.port: + switch.port.uptime: + switch.port.receive.packets: + switch.port.transmit.packets: + switch.port.receive.bytes: + switch.port.transmit.bytes: + switch.port.receive.drops: + switch.port.transmit.drops: + switch.port.receive.errors: + switch.port.transmit.errors: + switch.port.receive.frame_error: + switch.port.receive.overrun_error: + switch.port.receive.crc_error: + switch.port.collision.count: + attributes: + switch: resource_metadata.switch + port_number_on_switch: resource_metadata.port_number_on_switch + neutron_port_id: resource_metadata.neutron_port_id + controller: resource_metadata.controller + + - resource_type: port + metrics: + port: + port.uptime: + port.receive.packets: + port.transmit.packets: + port.receive.bytes: + port.transmit.bytes: + port.receive.drops: + port.receive.errors: + attributes: + controller: resource_metadata.controller + + - resource_type: switch_table + metrics: + switch.table.active.entries: + attributes: + controller: resource_metadata.controller + switch: resource_metadata.switch + + - resource_type: loadbalancer + metrics: + network.services.lb.outgoing.bytes: + network.services.lb.incoming.bytes: + network.services.lb.pool: + network.services.lb.listener: + network.services.lb.member: + network.services.lb.health_monitor: + network.services.lb.loadbalancer: + network.services.lb.total.connections: + network.services.lb.active.connections: + meters: + metric: + # Image + - name: "image.size" + event_type: + - "image.upload" + - "image.delete" + - "image.update" + type: "gauge" + unit: B + volume: $.payload.size + resource_id: $.payload.id + project_id: $.payload.owner + + - name: "image.download" + event_type: "image.send" + type: "delta" + unit: "B" + volume: $.payload.bytes_sent + resource_id: $.payload.image_id + user_id: $.payload.receiver_user_id + project_id: $.payload.receiver_tenant_id + + - name: "image.serve" + event_type: "image.send" + type: "delta" + unit: "B" + volume: $.payload.bytes_sent + resource_id: $.payload.image_id + project_id: $.payload.owner_id + + - name: 'volume.provider.capacity.total' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.total + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.free' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.free + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.allocated' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.allocated + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.provisioned' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.provisioned + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.virtual_free' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.virtual_free + resource_id: $.payload.name_to_id + + - name: 'volume.provider.pool.capacity.total' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.total + resource_id: $.payload.name_to_id + metadata: &provider_pool_meta + provider: $.payload.name_to_id.`split(#, 0, 1)` + + - name: 'volume.provider.pool.capacity.free' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.free + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.provider.pool.capacity.allocated' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.allocated + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.provider.pool.capacity.provisioned' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.provisioned + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.provider.pool.capacity.virtual_free' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.virtual_free + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.size' + event_type: + - 'volume.exists' + - 'volume.retype' + - 'volume.create.*' + - 'volume.delete.*' + - 'volume.resize.*' + - 'volume.attach.*' + - 'volume.detach.*' + - 'volume.update.*' + - 'volume.manage.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.volume_id + metadata: + display_name: $.payload.display_name + volume_type: $.payload.volume_type + image_id: $.payload.glance_metadata[?key=image_id].value + instance_id: $.payload.volume_attachment[0].server_id + + - name: 'snapshot.size' + event_type: + - 'snapshot.exists' + - 'snapshot.create.*' + - 'snapshot.delete.*' + - 'snapshot.manage.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.volume_size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.snapshot_id + metadata: + display_name: $.payload.display_name + + - name: 'backup.size' + event_type: + - 'backup.exists' + - 'backup.create.*' + - 'backup.delete.*' + - 'backup.restore.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.backup_id + metadata: + display_name: $.payload.display_name + + # Magnum + - name: $.payload.metrics.[*].name + event_type: 'magnum.bay.metrics.*' + type: 'gauge' + unit: $.payload.metrics.[*].unit + volume: $.payload.metrics.[*].value + user_id: $.payload.user_id + project_id: $.payload.project_id + resource_id: $.payload.resource_id + lookup: ['name', 'unit', 'volume'] + + # Swift + - name: $.payload.measurements.[*].metric.[*].name + event_type: 'objectstore.http.request' + type: 'delta' + unit: $.payload.measurements.[*].metric.[*].unit + volume: $.payload.measurements.[*].result + resource_id: $.payload.target.id + user_id: $.payload.initiator.id + project_id: $.payload.initiator.project_id + lookup: ['name', 'unit', 'volume'] + + - name: 'memory' + event_type: &instance_events compute.instance.(?!create.start|update).* + type: 'gauge' + unit: 'MB' + volume: $.payload.memory_mb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: &instance_meta + host: $.payload.host + flavor_id: $.payload.instance_flavor_id + flavor_name: $.payload.instance_type + display_name: $.payload.display_name + image_ref: $.payload.image_meta.base_image_ref + launched_at: $.payload.launched_at + created_at: $.payload.created_at + deleted_at: $.payload.deleted_at + + - name: 'vcpus' + event_type: *instance_events + type: 'gauge' + unit: 'vcpu' + volume: $.payload.vcpus + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'compute.instance.booting.time' + event_type: 'compute.instance.create.end' + type: 'gauge' + unit: 'sec' + volume: + fields: [$.payload.created_at, $.payload.launched_at] + plugin: 'timedelta' + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'disk.root.size' + event_type: *instance_events + type: 'gauge' + unit: 'GB' + volume: $.payload.root_gb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'disk.ephemeral.size' + event_type: *instance_events + type: 'gauge' + unit: 'GB' + volume: $.payload.ephemeral_gb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'bandwidth' + event_type: 'l3.meter' + type: 'delta' + unit: 'B' + volume: $.payload.bytes + project_id: $.payload.tenant_id + resource_id: $.payload.label_id + + - name: 'compute.node.cpu.frequency' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'MHz' + volume: $.payload.metrics[?(@.name='cpu.frequency')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.frequency')].source + + - name: 'compute.node.cpu.user.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.user.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.user.time')].source + + - name: 'compute.node.cpu.kernel.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.kernel.time')].source + + - name: 'compute.node.cpu.idle.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.idle.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.idle.time')].source + + - name: 'compute.node.cpu.iowait.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.iowait.time')].source + + - name: 'compute.node.cpu.kernel.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source + + - name: 'compute.node.cpu.idle.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.idle.percent')].source + + - name: 'compute.node.cpu.user.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.user.percent')].source + + - name: 'compute.node.cpu.iowait.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source + + - name: 'compute.node.cpu.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.percent')].source + + # Identity + # NOTE(gordc): hack because jsonpath-rw-ext can't concat starting with string. + - name: $.payload.outcome - $.payload.outcome + 'identity.authenticate.' + $.payload.outcome + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.authenticate' + resource_id: $.payload.initiator.id + user_id: $.payload.initiator.id + + # DNS + - name: 'dns.domain.exists' + event_type: 'dns.domain.exists' + type: 'cumulative' + unit: 's' + volume: + fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] + plugin: 'timedelta' + project_id: $.payload.tenant_id + resource_id: $.payload.id + user_id: $.ctxt.user + metadata: + status: $.payload.status + pool_id: $.payload.pool_id + host: $.publisher_id + + # Trove + - name: 'trove.instance.exists' + event_type: 'trove.instance.exists' + type: 'cumulative' + unit: 's' + volume: + fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] + plugin: 'timedelta' + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_id: $.payload.user_id + metadata: + nova_instance_id: $.payload.nova_instance_id + state: $.payload.state + service_id: $.payload.service_id + instance_type: $.payload.instance_type + instance_type_id: $.payload.instance_type_id + + # Manila + - name: 'manila.share.size' + event_type: + - 'share.create.*' + - 'share.delete.*' + - 'share.extend.*' + - 'share.shrink.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.size + user_id: $.payload.user_id + project_id: $.payload.project_id + resource_id: $.payload.share_id + metadata: + name: $.payload.name + host: $.payload.host + status: $.payload.status + availability_zone: $.payload.availability_zone + protocol: $.payload.proto + + polling: + sources: + - name: all_pollsters + interval: 300 + meters: + - "*" + pipeline: + sources: + - name: meter_source + meters: + - "*" + sinks: + - meter_sink + sinks: + - name: meter_sink + publishers: + - gnocchi + policy: {} + audit_api_map: + DEFAULT: + target_endpoint_type: None + path_keywords: + meters: meter_name + resources: resource_id + statistics: None + samples: sample_id + service_endpoints: + metering: service/metering + rally_tests: + CeilometerStats.create_meter_and_get_stats: + - args: + user_id: user-id + resource_id: resource-id + counter_volume: 1 + counter_unit: '' + counter_type: cumulative + runner: + type: constant + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + CeilometerMeters.list_meters: + - runner: + type: constant + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + context: + ceilometer: + counter_name: benchmark_meter + counter_type: gauge + counter_unit: "%" + counter_volume: 1 + resources_per_tenant: 1 + samples_per_resource: 1 + timestamp_interval: 10 + metadata_list: + - status: active + name: rally benchmark on + deleted: 'false' + - status: terminated + name: rally benchmark off + deleted: 'true' + args: + limit: 5 + metadata_query: + status: terminated + CeilometerQueries.create_and_query_samples: + - args: + filter: + "=": + counter_unit: instance + orderby: + limit: 10 + counter_name: cpu_util + counter_type: gauge + counter_unit: instance + counter_volume: 1 + resource_id: resource_id + runner: + type: constant + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + +dependencies: + dynamic: + common: + local_image_registry: + jobs: + - ceilometer-image-repo-sync + services: + - endpoint: node + service: local_image_registry + static: + central: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + ipmi: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + compute: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + db_sync: + jobs: [] + services: [] + ks_service: + services: + - endpoint: internal + service: identity + ks_user: + services: + - endpoint: internal + service: identity + rabbit_init: + services: + - service: oslo_messaging + endpoint: internal + notification: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + tests: + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metering + - endpoint: internal + service: metric + image_repo_sync: + services: + - endpoint: internal + service: local_image_registry + +# Names of secrets used by bootstrap and environmental checks +secrets: + identity: + admin: ceilometer-keystone-admin + ceilometer: ceilometer-keystone-user + test: ceilometer-keystone-test + oslo_messaging: + admin: ceilometer-rabbitmq-admin + ceilometer: ceilometer-rabbitmq-user + oci_image_registry: + ceilometer: ceilometer-oci-image-registry + +bootstrap: + enabled: false + ks_user: ceilometer + script: | + openstack token issue + +# typically overridden by environmental +# values, but should include all endpoints +# required by this chart +endpoints: + cluster_domain_suffix: cluster.local + local_image_registry: + name: docker-registry + namespace: docker-registry + hosts: + default: localhost + internal: docker-registry + node: localhost + host_fqdn_override: + default: null + port: + registry: + node: 5000 + oci_image_registry: + name: oci-image-registry + namespace: oci-image-registry + auth: + enabled: false + ceilometer: + username: ceilometer + password: password + hosts: + default: localhost + host_fqdn_override: + default: null + port: + registry: + default: null + identity: + name: keystone + auth: + admin: + region_name: RegionOne + username: admin + password: password + project_name: admin + user_domain_name: default + project_domain_name: default + ceilometer: + role: admin + region_name: RegionOne + username: ceilometer + password: password + project_name: service + user_domain_name: service + project_domain_name: service + test: + role: admin + region_name: RegionOne + username: ceilometer-test + password: password + project_name: test + user_domain_name: service + project_domain_name: service + hosts: + default: keystone + internal: keystone-api + host_fqdn_override: + default: null + path: + default: /v3 + scheme: + default: 'http' + port: + api: + default: 5000 + public: 80 + internal: 5000 + service: 5000 + metric: + name: gnocchi + hosts: + default: gnocchi-api + public: gnocchi + host_fqdn_override: + default: null + path: + default: null + scheme: + default: 'http' + port: + api: + default: 8041 + public: 80 + internal: 8041 + service: 8041 + alarming: + name: aodh + hosts: + default: aodh-api + public: aodh + host_fqdn_override: + default: null + path: + default: null + scheme: + default: 'http' + port: + api: + default: 8042 + public: 80 + oslo_cache: + auth: + # NOTE(portdirect): this is used to define the value for keystone + # authtoken cache encryption key, if not set it will be populated + # automatically with a random value, but to take advantage of + # this feature all services should be set to use the same key, + # and memcache service. + memcache_secret_key: null + hosts: + default: memcached + host_fqdn_override: + default: null + port: + memcache: + default: 11211 + oslo_messaging: + auth: + admin: + username: rabbitmq + password: password + ceilometer: + username: ceilometer + password: password + statefulset: + replicas: 2 + name: rabbitmq-rabbitmq + hosts: + default: rabbitmq + host_fqdn_override: + default: null + path: /ceilometer + scheme: rabbit + port: + amqp: + default: 5672 + http: + default: 15672 + +pod: + affinity: + anti: + type: + default: preferredDuringSchedulingIgnoredDuringExecution + topologyKey: + default: kubernetes.io/hostname + weight: + default: 10 + tolerations: + ceilometer: + enabled: false + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + mounts: + ceilometer_tests: + init_container: null + ceilometer_tests: + volumeMounts: + volumes: + ceilometer_compute: + init_container: null + ceilometer_compute: + volumeMounts: + volumes: + ceilometer_central: + init_container: null + ceilometer_central: + volumeMounts: + volumes: + ceilometer_ipmi: + init_container: null + ceilometer_ipmi: + volumeMounts: + volumes: + ceilometer_notification: + init_container: null + ceilometer_notification: + volumeMounts: + volumes: + ceilometer_db_sync: + ceilometer_db_sync: + volumeMounts: + volumes: + replicas: + central: 1 + notification: 1 + lifecycle: + upgrades: + deployments: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + daemonsets: + pod_replacement_strategy: RollingUpdate + compute: + enabled: true + min_ready_seconds: 0 + max_unavailable: 1 + resources: + enabled: false + compute: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + notification: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + central: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ipmi: + requests: + memory: "124Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + jobs: + db_sync: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + rabbit_init: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_service: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_user: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + tests: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + image_repo_sync: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + +network_policy: + ceilometer: + ingress: + - {} + egress: + - {} + +manifests: + configmap_bin: true + configmap_etc: true + deployment_api: false + deployment_central: true + deployment_collector: false + daemonset_compute: true + daemonset_ipmi: false + deployment_notification: true + ingress_api: false + job_bootstrap: true + job_db_drop: false + # using gnocchi so no db init + job_db_init: false + job_db_init_mongodb: false + # runs ceilometer-upgrade which inits resource types in gnocchi! + job_db_sync: true + job_image_repo_sync: true + job_ks_endpoints: false + job_ks_service: true + job_ks_user: true + job_rabbit_init: true + pdb_api: true + pod_rally_test: true + network_policy: false + secret_db: true + secret_keystone: true + secret_mongodb: false + secret_rabbitmq: true + secret_registry: true + service_api: true + service_ingress_api: true +... diff --git a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml index 7ade5b93..db1c37bb 100644 --- a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml +++ b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml @@ -234,7 +234,7 @@ pod: init_container: null gnocchi_tests: replicas: - api: 1 + api: 3 lifecycle: upgrades: deployments: @@ -246,11 +246,11 @@ pod: daemonsets: pod_replacement_strategy: RollingUpdate metricd: - enabled: false + enabled: true min_ready_seconds: 0 max_unavailable: 1 statsd: - enabled: false + enabled: true min_ready_seconds: 0 max_unavailable: 1 disruption_budget: diff --git a/helm-configs/postgresql/postgresql-helm-overrides.yaml b/helm-configs/postgresql/postgresql-helm-overrides.yaml index 679228c1..ad41ea06 100644 --- a/helm-configs/postgresql/postgresql-helm-overrides.yaml +++ b/helm-configs/postgresql/postgresql-helm-overrides.yaml @@ -239,7 +239,7 @@ jobs: # activeDeadlineSeconds == 0 means no deadline activeDeadlineSeconds: 0 backoffLimit: 6 - cron: "0 0 * * *" + cron: "15 0 * * *" history: success: 3 failed: 1 @@ -300,12 +300,12 @@ conf: hba_file: '/tmp/pg_hba.conf' ident_file: '/tmp/pg_ident.conf' backup: - enabled: false + enabled: true base_path: /var/backup days_to_keep: 3 pg_dumpall_options: '--inserts --clean' remote_backup: - enabled: false + enabled: true container_name: postgresql days_to_keep: 14 storage_policy: default-placement @@ -466,7 +466,7 @@ manifests: configmap_etc: true job_image_repo_sync: true network_policy: false - job_ks_user: false + job_ks_user: true secret_admin: true secret_etc: true secret_audit: true @@ -474,8 +474,8 @@ manifests: secret_registry: true service: true statefulset: true - cron_job_postgresql_backup: false - pvc_backup: false + cron_job_postgresql_backup: true + pvc_backup: true monitoring: prometheus: configmap_bin: false diff --git a/mkdocs.yml b/mkdocs.yml index d553449c..ce4435f5 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -182,6 +182,7 @@ nav: - skyline: openstack-skyline.md - Octavia: openstack-octavia.md - Gnocchi: openstack-gnocchi.md + - Ceilometer: openstack-ceilometer.md - Monitoring: - Monitoring Overview: prometheus-monitoring-overview.md - Prometheus: prometheus.md From e2063905de1d27ad654e165df9193339f7d4e2fc Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Tue, 19 Mar 2024 15:06:17 -0500 Subject: [PATCH 10/20] feat: new doc updates (#161) Signed-off-by: Kevin Carter --- docs/{examples => }/alertmanager-encore.md | 3 ++- docs/{examples => }/alertmanager-slack.md | 1 + docs/genestack-upgrade.md | 25 ++++++++++++++++++++++ docs/k8s-config.md | 12 +++++++++-- mkdocs.yml | 5 ++++- 5 files changed, 42 insertions(+), 4 deletions(-) rename docs/{examples => }/alertmanager-encore.md (98%) rename docs/{examples => }/alertmanager-slack.md (98%) diff --git a/docs/examples/alertmanager-encore.md b/docs/alertmanager-encore.md similarity index 98% rename from docs/examples/alertmanager-encore.md rename to docs/alertmanager-encore.md index 282b12ac..572963f0 100644 --- a/docs/examples/alertmanager-encore.md +++ b/docs/alertmanager-encore.md @@ -1,9 +1,10 @@ +# Encore Alerts The following example describes configuration options to send alerts via alertmanager to Rackspace encore, the `Encore UUID` is derived by account where the secret `SECRET KEY` is used per application submitting webhooks: -```yaml +``` yaml global: resolve_timeout: 5m receivers: diff --git a/docs/examples/alertmanager-slack.md b/docs/alertmanager-slack.md similarity index 98% rename from docs/examples/alertmanager-slack.md rename to docs/alertmanager-slack.md index 7a7ef539..6b6a77cb 100644 --- a/docs/examples/alertmanager-slack.md +++ b/docs/alertmanager-slack.md @@ -1,3 +1,4 @@ +# Slack Alerts The following example describes configuration options to send alerts via alertmanager to slack using a slack hook. diff --git a/docs/genestack-upgrade.md b/docs/genestack-upgrade.md index 8832c897..f1c62446 100644 --- a/docs/genestack-upgrade.md +++ b/docs/genestack-upgrade.md @@ -31,3 +31,28 @@ An update is generally the same as an install. Many of the Genestack application * When needing to run an upgrade for the infrastructure operators, consult the operator documentation to validate the steps required. * When needing to run an upgrade for the OpenStack components, simply re-run the `helm` charts as documented in the Genestack installation process. + +## Kubernetes Upgrade Notes + +Over the course of normal operations it's likely that a CRD will change versions, names, or something else. In these cases, should an operator or helm chart not gracefully handle an full upgrade, the `kubectl convert` plugin can be used to make some adjustments where needed. + +!!! example "Converting mmontes CRDs to mariadb official ones" + + ``` shell + kubectl get --namespace openstack crd.namespace -o yaml value > /tmp/value.crd.namespace.yaml + kubectl convert -f /tmp/value.crd.namespace.yaml --output-version new-namespace/VERSION + ``` + +## Kubernetes Finalizers + +When processing an upgrade there may come a time when a finalizer is stuck, typically something that happens when an operator or an api reference is changed. If this happens one way to resolve the issue is to patch the Finalizers. + +!!! warning + + Patching Finalizers could leave orphaned resources. Before patching a finalizer be sure your "ready." + +!!! example "Patching Finalizers" + + ``` shell + kubectl patch $@ --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' + ``` diff --git a/docs/k8s-config.md b/docs/k8s-config.md index 0f3cb870..f65c6617 100644 --- a/docs/k8s-config.md +++ b/docs/k8s-config.md @@ -8,8 +8,16 @@ Install the `kubectl` tool. ``` shell curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -sudo mv kubectl /usr/local/bin/ -sudo chmod +x /usr/local/bin/kubectl +sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +``` + +### Install the `convert` plugin + +The convert plugin can be used to assist with upgrades. + +``` shell +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert" +sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert ``` ## Retrieve the kube config diff --git a/mkdocs.yml b/mkdocs.yml index ce4435f5..69760446 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -143,6 +143,7 @@ nav: - Kubernetes Dashboard: k8s-dashboard.md - Kubernetes Taint: k8s-taint.md - Retrieve kube config: k8s-config.md + - Prometheus: prometheus.md - Storage: - storage-overview.md - Ceph Internal: storage-ceph-rook-internal.md @@ -185,13 +186,15 @@ nav: - Ceilometer: openstack-ceilometer.md - Monitoring: - Monitoring Overview: prometheus-monitoring-overview.md - - Prometheus: prometheus.md - Grafana: grafana.md - MySQL Exporter: prometheus-mysql-exporter.md - RabbitMQ Exporter: prometheus-rabbitmq-exporter.md - Memcached Exporter: prometheus-memcached-exporter.md - Postgres Exporter: prometheus-postgres-exporter.md - Openstack Exporter: prometheus-openstack-metrics-exporter.md + - Alert Manager Examples: + - alertmanager-encore.md + - alertmanager-slack.md - Operational Guide: - Running Genestack Upgrade: genestack-upgrade.md - Running Kubespray Upgrade: k8s-kubespray-upgrade.md From c15704dcc699b73fd3f12104f5a1a733fb89b34a Mon Sep 17 00:00:00 2001 From: dereknoblej <48695318+dereknoblej@users.noreply.github.com> Date: Tue, 19 Mar 2024 15:07:08 -0500 Subject: [PATCH 11/20] DOC: Openstack Server Docs (#156) * DOC: Openstack Server Docs * Fixing spacing issues * trailing white space * updated links to latests docs * Resolved requested changes --- docs/openstack-floating-ips.md | 2 +- docs/openstack-servers.md | 152 +++++++++++++++++++++++++++++++++ mkdocs.yml | 1 + 3 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 docs/openstack-servers.md diff --git a/docs/openstack-floating-ips.md b/docs/openstack-floating-ips.md index d8cc37e9..015567a6 100644 --- a/docs/openstack-floating-ips.md +++ b/docs/openstack-floating-ips.md @@ -1,6 +1,6 @@ # Openstack Floating Ips -To read more about Openstack Floating Ips using the [upstream docs](https://docs.openstack.org/python-openstackclient/pike/cli/command-objects/floating-ip.html). +To read more about Openstack Floating Ips using the [upstream docs](https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/floating-ip.html). #### List and view floating ips diff --git a/docs/openstack-servers.md b/docs/openstack-servers.md new file mode 100644 index 00000000..83f81501 --- /dev/null +++ b/docs/openstack-servers.md @@ -0,0 +1,152 @@ +# Openstack Servers + +To read more about Openstack Servers using the [upstream docs](https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/server.html). + +#### List and view servers + +``` shell +openstack server list + [--quote {all,minimal,none,nonnumeric}] + [--reservation-id ] + [--ip ] + [--ip6 ] + [--name ] + [--instance-name ] + [--status ] + [--flavor ] + [--image ] + [--host ] + [--all-projects] + [--project ] + [--project-domain ] + [--user ] + [--user-domain ] + [--long] + [-n] + [--marker ] + [--limit ] + [--deleted] + [--changes-since ] +``` + +#### Create a new server + +``` shell +openstack server create + (--image | --volume ) + --flavor + [--security-group ] + [--key-name ] + [--property ] + [--file ] + [--user-data ] + [--availability-zone ] + [--block-device-mapping ] + [--nic ] + [--network ] + [--port ] + [--hint ] + [--config-drive |True] + [--min ] + [--max ] + [--wait] + +``` + +#### Delete a server + +``` shell +openstack server delete [--wait] [ ...] +``` + +# Launch a server from a snapshot + +#### Create a snapshot of the instance + +!!! note + + If necessary, list the instances to view the instance name with the list server command above. + +1. Shut down the source VM before you take the snapshot to ensure that all data is flushed to disk. Use the openstack server stop command to shut down the instance: + + ``` shell + openstack server stop myInstance + ``` + +2. Use the openstack server list command to confirm that the instance shows a SHUTOFF status. + +3. Use the openstack server image create command to take a snapshot: + + ``` shell + openstack server image create myInstance --name myInstanceSnapshot + ``` + The above command creates the image myInstance by taking a snapshot of a running server. + +4. Use the openstack image list command to check the status until the status is active: + + ``` shell + openstack image list + ``` + +#### Download the snapshot + +!!! note + + Get the image id from the image list command (seen above). + +Download the snapshot by using the image ID: + +``` shell +openstack image save --file snapshot.raw {Image ID} +``` + +Make the image available to the new environment, either through HTTP or direct upload to a machine (scp). + +#### Import the snapshot to the new env + +In the new project or cloud environment, import the snapshot: + +``` shell +openstack image create NEW_IMAGE_NAME \ + --container-format bare --disk-format qcow2 --file IMAGE_URL +``` + +#### Boot a new sever from the snapshot + +In the new project or cloud environment, use the snapshot to create the new instance: + +``` shell +openstack server create --flavor m1.tiny --image myInstanceSnapshot myNewInstance +``` + + +# Launch a server from a volume + +#### Boot instance from volume + +You can create a bootable volume from an existing image, volume, or snapshot. This procedure shows you how to create a volume from an image and use the volume to boot an instance. + +1. List available images, noting the ID of the image that you wish to use. + ``` shell + openstack image list + ``` +2. Create a bootable volume from the chosen image. + ``` shell + openstack volume create \ + --image {Image ID} --size 10 \ + test-volume + ``` +3. Create a server, specifying the volume as the boot device. + ``` shell + $ openstack server create \ + --flavor $FLAVOR --network $NETWORK \ + --volume {Volume ID}\ + --wait test-server + ``` +4. List volumes once again to ensure the status has changed to in-use and the volume is correctly reporting the attachment. + ``` shell + openstack volume list + ``` + ``` shell + openstack server volume list test-server + ``` diff --git a/mkdocs.yml b/mkdocs.yml index 69760446..8fc36cde 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -214,3 +214,4 @@ nav: - Cloud Onboarding: - Openstack Security Groups: openstack-security-groups.md - Openstack Floating Ips: openstack-floating-ips.md + - Openstack Servers: openstack-servers.md From 2bf2f5a52094da1e8fbfb862da729b386b888c16 Mon Sep 17 00:00:00 2001 From: ajay2012 <97219541+ajay2012@users.noreply.github.com> Date: Wed, 20 Mar 2024 01:54:00 +0530 Subject: [PATCH 12/20] Nova cpu allocation ratio.md (#160) * Create openstack-nova-cpu-allocation-ratio-guide.md * Update mkdocs.yml * Rename openstack-nova-cpu-allocation-ratio-guide.md to openstack-cpu-allocation-ratio.md * Update nova-helm-overrides.yaml * Update openstack-cpu-allocation-ratio.md --- docs/openstack-cpu-allocation-ratio.md | 53 ++++++++++++++++++++++ helm-configs/nova/nova-helm-overrides.yaml | 2 +- mkdocs.yml | 1 + 3 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 docs/openstack-cpu-allocation-ratio.md diff --git a/docs/openstack-cpu-allocation-ratio.md b/docs/openstack-cpu-allocation-ratio.md new file mode 100644 index 00000000..f6fe43b0 --- /dev/null +++ b/docs/openstack-cpu-allocation-ratio.md @@ -0,0 +1,53 @@ +# Nova CPU allocation Guide + +By default openstack helm provide cpu allocation of 3:1. For a production deployment, cpu allocation ratio need to be decided based on multiple factors like: + +1. Workload type: Different workloads have different CPU utilization patterens. For example, web servers might have bursty utilization, while database might have more consistent. +2. Peformance Requirments: Consider the performace requirment of the workloads. Some applications may require dedicated CPU resource to meet required performace SLA, whereas other can share resources. +3. Overhead: Account for the overhead introduced by the operating system, hypervidor and virtulization layer. Sometime compute node are used as hyperconserved nodes as well. This can impact the effective allocation ratio. +4. Peak vs Average Usage: Determin whether to set allocation ratios based on peak or average CPU usage. Peak usages ensure there are enough resources available durig period of high demand, but it may result in underutilization during off-peak hours. +5. Growth and Scalability: Consider future growth and scalability needs when setting CPU allocation ratios. Allocating too liberally may result in wasted resources while allocating too conservatively may lead to resource shortage as the deployment scale. + +Lets consider below two use case to calculate CPU allocation for our deployment with HPE DL380 Server. + +### Case 1: CPU allocation ratio for shared CPU + +Workload type: Considering a flavor with 8 vCPU for workload which will meet its peak demand and required performace. + +Max VM per host: Considering max of 60 VM of such flavor can be hosted on a single hypervisor as per our scaling/growth forcast. + +CPUs on hypervisor: HPE DL380 have 72 PCPU. + + +Example : +``` shell + Total physical CPU (PCPU) = 72 + No. of vCPU per flavor (VCPU) = 8 + No. of Instance per hypervisor (VM) = 60 + Overhead on CPU (OCPU) = 8 + Formula to calculate CPU allocation ratio: + + CAR = VM * VCPU / (PCPU - OPCU) + CAR = 60 * 8 / (72 - 8) + = 480/64 + = ~8 +``` +So here we get approx CPU allocation ratio of 8.1. + +### Case 2: Shared workload with CPU pining: + +There may be requirement to run CPU pinned VM along with floating instances (shared cpus). In such case CPU allocation for compute node will be different from rest of nodes. Lets see how to get cpu allocation for such type of compute nodes: + +Example : +``` shell + No. of CPU dedicated for CPU pinning (RCPUP) : 16 + CPU allocation ratio: + + CAR = VM * VCPU / (PCPU - RCPUP - OCPU) + CAR = 60 * 8 / (72 - 16 - 8) + = 480/48 + = 10 +``` +So, here cpu allocation will be 10.1 on host hosting cpu pinned instances and floating instances. + +Please note , above is an example only. For your use case it is required to considering flavor's CPU specifications based on application benchmark requirments, its peak utilization and scaling needs of future. diff --git a/helm-configs/nova/nova-helm-overrides.yaml b/helm-configs/nova/nova-helm-overrides.yaml index d6923c6c..450a8a9d 100644 --- a/helm-configs/nova/nova-helm-overrides.yaml +++ b/helm-configs/nova/nova-helm-overrides.yaml @@ -1375,7 +1375,7 @@ conf: default_ephemeral_format: ext4 ram_allocation_ratio: 1.0 disk_allocation_ratio: 1.0 - cpu_allocation_ratio: 3.0 + cpu_allocation_ratio: 8.0 state_path: /var/lib/nova osapi_compute_listen: 0.0.0.0 # NOTE(portdirect): the bind port should not be defined, and is manipulated diff --git a/mkdocs.yml b/mkdocs.yml index 8fc36cde..c0cda4dc 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -205,6 +205,7 @@ nav: - Generating Clouds YAML: openstack-clouds.md - Keystone Federation to Rackspace: openstack-keystone-federation.md - Nova Flavor Creation: openstack-flavors.md + - Nova CPU Allocation Ratio: openstack-cpu-allocation-ratio.md - Creating Networks: openstack-neutron-networks.md - Glance Images Creation: openstack-glance-images.md - Building Local Images: build-local-images.md From 732b7df80a72e5a91225354f2d819a13d94ca40e Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Tue, 19 Mar 2024 15:30:09 -0500 Subject: [PATCH 13/20] feat: changing the docs for consistent experiences (#162) Signed-off-by: Kevin Carter --- docs/alertmanager-slack.md | 2 +- docs/grafana.md | 6 +- docs/infrastructure-metallb.md | 2 +- docs/infrastructure-ovn-db-backup.md | 10 +- docs/infrastructure-postgresql.md | 4 +- docs/openstack-ceilometer.md | 10 +- docs/openstack-cpu-allocation-ratio.md | 4 +- docs/openstack-floating-ips.md | 18 +- docs/openstack-gnocchi.md | 12 +- docs/openstack-security-groups.md | 29 +-- docs/openstack-servers.md | 12 +- docs/vault-secrets-operator.md | 258 ++++++++++++++----------- docs/vault.md | 2 +- 13 files changed, 205 insertions(+), 164 deletions(-) diff --git a/docs/alertmanager-slack.md b/docs/alertmanager-slack.md index 6b6a77cb..eafc3147 100644 --- a/docs/alertmanager-slack.md +++ b/docs/alertmanager-slack.md @@ -3,7 +3,7 @@ The following example describes configuration options to send alerts via alertmanager to slack using a slack hook. -```yaml +``` yaml alertmanager: alertmanagerSpec: image: diff --git a/docs/grafana.md b/docs/grafana.md index 07310958..55db8c6d 100644 --- a/docs/grafana.md +++ b/docs/grafana.md @@ -17,7 +17,7 @@ In order to avoid putting sensative information on the cli, it is recommended to example secret file: -```yaml +``` yaml apiversion: v1 data: client_id: base64_encoded_client_id @@ -37,7 +37,7 @@ If you have specific datasources that should be populated when grafana deploys, example datasources yaml file: -```yaml +``` yaml datasources: datasources.yaml: apiversion: 1 @@ -126,7 +126,7 @@ Your cert and key files should look something like the following (cert and key e ## Add repo and install -```shell +``` shell helm repo add grafana https://grafana.github.io/helm-charts helm repo update kubectl create ns grafana diff --git a/docs/infrastructure-metallb.md b/docs/infrastructure-metallb.md index 944bd764..be8cee67 100644 --- a/docs/infrastructure-metallb.md +++ b/docs/infrastructure-metallb.md @@ -6,7 +6,7 @@ need to be customized to meet the needs of your environment. ## Example LB manifest -```yaml +``` yaml metadata: name: openstack-external namespace: metallb-system diff --git a/docs/infrastructure-ovn-db-backup.md b/docs/infrastructure-ovn-db-backup.md index 248d4391..86866755 100644 --- a/docs/infrastructure-ovn-db-backup.md +++ b/docs/infrastructure-ovn-db-backup.md @@ -34,16 +34,18 @@ The directions in the _Kube-OVN_ documentation use `docker run` to get a working The _Kube-OVN_ documentation directs you to pick the node running the `ovn-central` pod associated with the first IP of the `NODE_IPS` environment variable. You should find the `NODE_IPS` environment variable defined on an `ovn-central` pod or the `ovn-central` _Deployment_. Assuming you can run the `kubectl` commands, the following example gets the node IPs off of one of the the deployment: -``` -$ kubectl get deployment -n kube-system ovn-central -o yaml | grep -A1 'name: NODE_IPS' +``` shell +kubectl get deployment -n kube-system ovn-central -o yaml | grep -A1 'name: NODE_IPS' + - name: NODE_IPS value: 10.130.140.246,10.130.140.250,10.130.140.252 ``` Then find the _k8s_ node with the first IP. You can see your _k8s_ nodes and their IPs with the command `kubectl get node -o wide`: -``` -$ kubectl get node -o wide | grep 10.130.140.246 +``` shell +kubectl get node -o wide | grep 10.130.140.246 + k8s-controller01 Ready control-plane 3d17h v1.28.6 10.130.140.246 Ubuntu 22.04.3 LTS 6.5.0-17-generic containerd://1.7.11 root@k8s-controller01:~# ``` diff --git a/docs/infrastructure-postgresql.md b/docs/infrastructure-postgresql.md index 1ec86d22..23536cf9 100644 --- a/docs/infrastructure-postgresql.md +++ b/docs/infrastructure-postgresql.md @@ -2,7 +2,7 @@ ## Create Secrets -```shell +``` shell kubectl --namespace openstack create secret generic postgresql-identity-admin \ --type Opaque \ --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" @@ -23,7 +23,7 @@ kubectl --namespace openstack create secret generic postgresql-db-audit \ Consider the PVC size you will need for the environment you're deploying in. Make adjustments as needed near `storage.[pvc|archive_pvc].size` and `volume.backup.size` to your helm overrides. -```shell +``` shell cd /opt/genestack/submodules/openstack-helm-infra helm upgrade --install postgresql ./postgresql \ --namespace=openstack \ diff --git a/docs/openstack-ceilometer.md b/docs/openstack-ceilometer.md index d43f1879..9d67fb4f 100644 --- a/docs/openstack-ceilometer.md +++ b/docs/openstack-ceilometer.md @@ -2,7 +2,7 @@ ## Create Secrets -```shell +``` shell kubectl --namespace openstack create secret generic ceilometer-keystone-admin-password \ --type Opaque \ --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" @@ -16,7 +16,7 @@ kubectl --namespace openstack create secret generic ceilometer-rabbitmq-password ## Run the package deployment -```shell +``` shell cd /opt/genestack/submodules/openstack-helm helm upgrade --install ceilometer ./ceilometer \ --namespace=openstack \ @@ -59,7 +59,7 @@ Without them, metrics can't be stored, so let's verify they exist. The output should include named resource types and some attributes for resources like `instance`, `instance_disk`, `network`, `volume`, etc. -```shell +``` shell kubectl exec -it openstack-admin-client -n openstack -- openstack metric resource-type list ``` @@ -67,7 +67,7 @@ kubectl exec -it openstack-admin-client -n openstack -- openstack metric resourc Confirm that resources are populating in Gnocchi -```shell +``` shell kubectl exec -it openstack-admin-client -n openstack -- openstack metric resource list ``` @@ -75,6 +75,6 @@ kubectl exec -it openstack-admin-client -n openstack -- openstack metric resourc Confirm that metrics can be retrieved from Gnocchi -```shell +``` shell kubectl exec -it openstack-admin-client -n openstack -- openstack metric list ``` diff --git a/docs/openstack-cpu-allocation-ratio.md b/docs/openstack-cpu-allocation-ratio.md index f6fe43b0..2b0080b9 100644 --- a/docs/openstack-cpu-allocation-ratio.md +++ b/docs/openstack-cpu-allocation-ratio.md @@ -26,7 +26,7 @@ Example : No. of Instance per hypervisor (VM) = 60 Overhead on CPU (OCPU) = 8 Formula to calculate CPU allocation ratio: - + CAR = VM * VCPU / (PCPU - OPCU) CAR = 60 * 8 / (72 - 8) = 480/64 @@ -42,7 +42,7 @@ Example : ``` shell No. of CPU dedicated for CPU pinning (RCPUP) : 16 CPU allocation ratio: - + CAR = VM * VCPU / (PCPU - RCPUP - OCPU) CAR = 60 * 8 / (72 - 16 - 8) = 480/48 diff --git a/docs/openstack-floating-ips.md b/docs/openstack-floating-ips.md index 015567a6..37dc0fea 100644 --- a/docs/openstack-floating-ips.md +++ b/docs/openstack-floating-ips.md @@ -5,7 +5,7 @@ To read more about Openstack Floating Ips using the [upstream docs](https://docs #### List and view floating ips ``` shell -$ openstack floating ip list +openstack floating ip list [--network ] [--port ] [--fixed-ip-address ] @@ -18,7 +18,7 @@ $ openstack floating ip list #### Create a floating ip ``` shell -$ openstack floating ip create +openstack floating ip create [--subnet ] [--port ] [--floating-ip-address ] @@ -36,7 +36,7 @@ $ openstack floating ip create ``` shell -$ openstack floating ip delete [ ...] +openstack floating ip delete [ ...] ``` #### Floating ip set @@ -44,7 +44,7 @@ $ openstack floating ip delete [ ...] Set floating IP properties ``` shell -$ openstack floating ip set +openstack floating ip set --port [--fixed-ip-address ] @@ -53,13 +53,13 @@ $ openstack floating ip set #### Display floating ip details ``` shell -$ openstack floating ip show +openstack floating ip show ``` #### Unset floating IP Properties ``` shell -$ openstack floating ip unset +openstack floating ip unset --port ``` @@ -71,7 +71,7 @@ You can assign a floating IP address to a project and to an instance. Associate an IP address with an instance in the project, as follows: ``` shell -$ openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS +openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS ``` #### Disassociate floating IP addresses @@ -79,10 +79,10 @@ $ openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS To disassociate a floating IP address from an instance: ``` shell -$ openstack server remove floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS +openstack server remove floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS ``` To remove the floating IP address from a project: ``` shell -$ openstack floating ip delete FLOATING_IP_ADDRESS +openstack floating ip delete FLOATING_IP_ADDRESS ``` diff --git a/docs/openstack-gnocchi.md b/docs/openstack-gnocchi.md index 7f28dbb8..3961d0ee 100644 --- a/docs/openstack-gnocchi.md +++ b/docs/openstack-gnocchi.md @@ -2,7 +2,7 @@ ## Create Secrets -```shell +``` shell kubectl --namespace openstack create secret generic gnocchi-admin \ --type Opaque \ --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" @@ -22,7 +22,7 @@ options for ceph. The below simply creates the expected `ceph-etc` ConfigMap with the ceph.conf needed by Gnocchi to establish a connection to the mon host(s) via the rados client. -```shell +``` shell kubectl apply -n openstack -f - < [ ...] ``` shell openstack server image create myInstance --name myInstanceSnapshot ``` + The above command creates the image myInstance by taking a snapshot of a running server. 4. Use the openstack image list command to check the status until the status is active: @@ -119,7 +120,6 @@ In the new project or cloud environment, use the snapshot to create the new inst openstack server create --flavor m1.tiny --image myInstanceSnapshot myNewInstance ``` - # Launch a server from a volume #### Boot instance from volume @@ -127,26 +127,34 @@ openstack server create --flavor m1.tiny --image myInstanceSnapshot myNewInstanc You can create a bootable volume from an existing image, volume, or snapshot. This procedure shows you how to create a volume from an image and use the volume to boot an instance. 1. List available images, noting the ID of the image that you wish to use. + ``` shell openstack image list ``` + 2. Create a bootable volume from the chosen image. + ``` shell openstack volume create \ --image {Image ID} --size 10 \ test-volume ``` + 3. Create a server, specifying the volume as the boot device. + ``` shell - $ openstack server create \ + openstack server create \ --flavor $FLAVOR --network $NETWORK \ --volume {Volume ID}\ --wait test-server ``` + 4. List volumes once again to ensure the status has changed to in-use and the volume is correctly reporting the attachment. + ``` shell openstack volume list ``` + ``` shell openstack server volume list test-server ``` diff --git a/docs/vault-secrets-operator.md b/docs/vault-secrets-operator.md index 8464d971..8749186f 100644 --- a/docs/vault-secrets-operator.md +++ b/docs/vault-secrets-operator.md @@ -4,139 +4,163 @@ The Vault Secrets Operator (VSO) enables Pods to seamlessly consume Vault secret ## Prerequisites -Before starting the installation, ensure the following prerequisites are met: -- **HashiCorp Vault:** Ensure HashiCorp Vault is installed in the cluster. You can refer [vault.md](https://github.com/rackerlabs/genestack/blob/main/docs/vault.md) for more details. +!!! note "Before starting the installation, ensure the following prerequisites are met" + + **HashiCorp Vault:** Ensure HashiCorp Vault is installed in the cluster. You can refer [vault.md](https://github.com/rackerlabs/genestack/blob/main/docs/vault.md) for more details. ## Installation -- Navigate to the Vault Secrets Operator base directory: - ``` shell - cd kustomize/vault-secrets-operator/base - ``` -- Modify the `values.yaml` file with your desired configurations. Refer to the sample configuration in this directory, already updated for installation. - ``` shell - vi values.yaml - ``` +Navigate to the Vault Secrets Operator base directory: + +``` shell +cd kustomize/vault-secrets-operator/base +``` + +Modify the `values.yaml` file with your desired configurations. Refer to the sample configuration in this directory, already updated for installation. + +``` shell +vi values.yaml +``` + +Perform the installation. -- Perform the installation: - ``` shell - kustomize build . --enable-helm | kubectl apply -f - - ``` +``` shell +kustomize build . --enable-helm | kubectl apply -f - +``` ## Consume secrets from the Vault + After installing the `vault-secrets-operator`, create the necessary resources to consume secrets stored in Vault. ### Connect to the vault -- Create a `VaultConnection` resource to establish a connection to Vault: - ``` - apiVersion: secrets.hashicorp.com/v1beta1 - kind: VaultConnection - metadata: - namespace: openstack - name: vault-connection - spec: - # required configuration - # address to the Vault server. - address: https://vault.vault.svc.cluster.local:8200 - - # optional configuration - # HTTP headers to be included in all Vault requests. - # headers: [] - # TLS server name to use as the SNI host for TLS connections. - # tlsServerName: "" - # skip TLS verification for TLS connections to Vault. - skipTLSVerify: false - # the trusted PEM encoded CA certificate chain stored in a Kubernetes Secret - caCertSecretRef: "vault-ca-secret" - ``` - `vault-ca-secret`: CA certificate used to sign the Vault certificate for internal communication. + +Create a `VaultConnection` resource to establish a connection to Vault. + +``` yaml +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultConnection +metadata: +namespace: openstack +name: vault-connection +spec: +# required configuration +# address to the Vault server. +address: https://vault.vault.svc.cluster.local:8200 + +# optional configuration +# HTTP headers to be included in all Vault requests. +# headers: [] +# TLS server name to use as the SNI host for TLS connections. +# tlsServerName: "" +# skip TLS verification for TLS connections to Vault. +skipTLSVerify: false +# the trusted PEM encoded CA certificate chain stored in a Kubernetes Secret +caCertSecretRef: "vault-ca-secret" +``` + +`vault-ca-secret`: CA certificate used to sign the Vault certificate for internal communication. ### Authenticate with vault: -- Create a `VaultAuth` resource to authenticate with Vault and access secrets: - ``` - apiVersion: secrets.hashicorp.com/v1beta1 - kind: VaultAuth - metadata: - name: keystone-auth - namespace: openstack - spec: - method: kubernetes - mount: genestack - kubernetes: - role: osh - serviceAccount: default - audiences: - - vault - vaultConnectionRef: vault-connection - ``` + +Create a `VaultAuth` resource to authenticate with Vault and access secrets. + +``` yaml +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: +name: keystone-auth +namespace: openstack +spec: +method: kubernetes +mount: genestack +kubernetes: + role: osh + serviceAccount: default + audiences: + - vault +vaultConnectionRef: vault-connection +``` ### Create Vault static: -- Define a `VaultStaticSecret` resource to fetch a secret from Vault and create a Kubernetes Secret resource: - ``` - apiVersion: secrets.hashicorp.com/v1beta1 - kind: VaultStaticSecret - metadata: - name: keystone-rabbitmq-password - namespace: openstack - spec: - type: kv-v2 - # mount path - mount: 'osh/keystone' +Define a `VaultStaticSecret` resource to fetch a secret from Vault and create a Kubernetes Secret resource. - # path of the secret - path: keystone-rabbitmq-password +``` yaml +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: +name: keystone-rabbitmq-password +namespace: openstack +spec: +type: kv-v2 - # dest k8s secret - destination: - name: keystone-rabbitmq-password - create: true +# mount path +mount: 'osh/keystone' - # static secret refresh interval - refreshAfter: 30s +# path of the secret +path: keystone-rabbitmq-password - # Name of the CRD to authenticate to Vault - vaultAuthRef: keystone-auth - ``` - This `VaultStaticSecret` resource fetches the `keystone-rabbitmq-password` secret from Vault and creates a Kubernetes Secret named `keystone-rabbitmq-password` in the openstack namespace which you can further use in the Genestack running on Kubernetes. -## Example usage: -``` -# From Vault: -/ $ vault kv get osh/keystone/keystone-rabbitmq-password -================ Secret Path ================ -osh/keystone/data/keystone-rabbitmq-password - -======= Metadata ======= -Key Value ---- ----- -created_time 2024-02-21T12:13:20.961200482Z -custom_metadata -deletion_time n/a -destroyed false -version 1 - -====== Data ====== -Key Value ---- ----- -password EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr - -# From Kubernetes: -$ kubectl apply -f vaultconnection.yaml -$ kubectl apply -f vault-auth.yaml -$ kubectl apply -f keystone-rabbitmq-password-vault.yaml - -$ kubectl get secret keystone-rabbitmq-password -n openstack -NAME TYPE DATA AGE -keystone-rabbitmq-password Opaque 2 14h - -$ kubectl get secret keystone-rabbitmq-password -n openstack -o yaml -apiVersion: v1 -data: - _raw: eyJkYXRhIjp7InBhc3N3b3JkIjoiRUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpyciJ9LCJtZXRhZGF0YSI6eyJjcmVhdGVkX3RpbWUiOiIyMDI0LTAyLTIxVDEyOjEzOjIwLjk2MTIwMDQ4MloiLCJjdXN0b21fbWV0YWRhdGEiOm51bGwsImRlbGV0aW9uX3RpbWUiOiIiLCJkZXN0cm95ZWQiOmZhbHNlLCJ2ZXJzaW9uIjoxfX0= - password: RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg== -kind: Secret -[...] - -$ echo "RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg==" |base64 -d -EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr +# dest k8s secret +destination: + name: keystone-rabbitmq-password + create: true + +# static secret refresh interval +refreshAfter: 30s + +# Name of the CRD to authenticate to Vault +vaultAuthRef: keystone-auth ``` + +This `VaultStaticSecret` resource fetches the `keystone-rabbitmq-password` secret from Vault and creates a Kubernetes Secret named `keystone-rabbitmq-password` in the openstack namespace which you can further use in the Genestack running on Kubernetes. + +!!! example "Example usage workflow" + + ``` shell + # From Vault: + vault kv get osh/keystone/keystone-rabbitmq-password + ================ Secret Path ================ + osh/keystone/data/keystone-rabbitmq-password + + ======= Metadata ======= + Key Value + --- ----- + created_time 2024-02-21T12:13:20.961200482Z + custom_metadata + deletion_time n/a + destroyed false + version 1 + + ====== Data ====== + Key Value + --- ----- + password EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr + ``` + + Apply the reuired configuration files. + + ``` shell + # From Kubernetes: + kubectl apply -f vaultconnection.yaml + kubectl apply -f vault-auth.yaml + kubectl apply -f keystone-rabbitmq-password-vault.yaml + ``` + + Return the secret in YAML + + ``` shell + kubectl get secret keystone-rabbitmq-password -n openstack -o yaml + apiVersion: v1 + data: + _raw: eyJkYXRhIjp7InBhc3N3b3JkIjoiRUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpyciJ9LCJtZXRhZGF0YSI6eyJjcmVhdGVkX3 RpbWUiOiIyMDI0LTAyLTIxVDEyOjEzOjIwLjk2MTIwMDQ4MloiLCJjdXN0b21fbWV0YWRhdGEiOm51bGwsImRlbGV0aW9uX3RpbWUiOiIiLCJkZXN0cm95ZWQiOmZhbHNlLCJ2ZXJzaW9uIjox fX0= + password: RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg== + kind: Secret + [...] + ``` + + Check the return password. + + ``` shell + echo "RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg==" | base64 -d + EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr + ``` diff --git a/docs/vault.md b/docs/vault.md index 62b03496..dc911161 100644 --- a/docs/vault.md +++ b/docs/vault.md @@ -78,7 +78,7 @@ On each Vault pod (vault-1, vault-2), use any of the 2 unseal keys obtained duri ``` shell kubectl exec -it vault-1 -n vault -- vault operator unseal ``` -```shell +``` shell kubectl exec -it vault-2 -n vault -- vault operator unseal ``` From 70c7f267a2e7e884fe0329613a602f8b415ac9d5 Mon Sep 17 00:00:00 2001 From: Sulochan Acharya Date: Wed, 20 Mar 2024 13:15:43 +0545 Subject: [PATCH 14/20] Adds Gateway API controller - nginx-gateway-fabric (#108) * Adds Gateway API controller - nginx-gateway-fabric In order to move from ingress to gateway api we need to first need to add the gateway api controller and associated crds. This commit adds the nginx-gateway-fabric controller. JIRA: OSPC-298 * Fix Gateway API docs --- .gitmodules | 3 + docs/assets/images/flexingress.png | Bin 0 -> 48950 bytes docs/infrastructure-gateway-api.md | 121 ++++++++++++++++++ .../nginx-gateway-fabric/helm-overrides.yaml | 117 +++++++++++++++++ mkdocs.yml | 1 + submodules/nginx-gateway-fabric | 1 + 6 files changed, 243 insertions(+) create mode 100644 docs/assets/images/flexingress.png create mode 100644 docs/infrastructure-gateway-api.md create mode 100644 helm-configs/nginx-gateway-fabric/helm-overrides.yaml create mode 160000 submodules/nginx-gateway-fabric diff --git a/.gitmodules b/.gitmodules index 0bf31b97..64a92954 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,3 +13,6 @@ [submodule "submodules/openstack-exporter"] path = submodules/openstack-exporter url = https://github.com/openstack-exporter/helm-charts +[submodule "submodules/nginx-gateway-fabric"] + path = submodules/nginx-gateway-fabric + url = https://github.com/nginxinc/nginx-gateway-fabric.git diff --git a/docs/assets/images/flexingress.png b/docs/assets/images/flexingress.png new file mode 100644 index 0000000000000000000000000000000000000000..68fa6baf21ae021e75959ee57e440daf024a4171 GIT binary patch literal 48950 zcmeFY1$`VBn7?N}76@EU_^lpU;J(`AD+OQ{Xb+|1R+Ta9vHR3GRD*W{1^bF_-p1aVViP4&NC^RN~M!cp9Dxe^6p3 zf~V+9`x|T)Z`pt%U((@r8`_|3VkQoTYA-UF9Nw@mWN(9~!!^$vwz%ONn&GcR0e`gc z55e_Ai2cYN;3v)RcUv?Tqrd^HVH5G>Ogssi+Y7i#k+2QQfZuM1%K~2?aUH3*h+ULV>vxG3(*quL`eZPqE!Ty{?gq^kB8Y(cm8T!86Ke2^U@itf+*>QXo%fq;v#F z8BN_$Vo2#x$-+839?ucQ?y4WV$NoZLZQ#A_~5Y6NsKW_N@w z3cmrQ7|)D>fI*u*>~X^tcDrmiOE7FHm$%X;zv0t1N_O_oY59qw91G}ZIG6ycmW@a* zBSV?^Qp+x5pJahlE~@s=+UupIPHeS|(AFr$PEPD?nPC{tWRB<%ZJ9$Dg! zuoimaGRZ`^gI(&%9>jh#F*0LAmt0^(#x6>^h^-D9;Nl-hXZdD)kzk2*V7>m~u1m)X z8-h0Q9HpcE!+s@<{pXbahjJ3MxD8=P^lw-CkGTF<#x9?M77Q9NAuJ!q?*so7!nX3) zM=2l66PIF7e-dc|j#^5TGMOfno2-8=m8D~rj|}}w7y4Hgg$Jhy0{v0L{`oUyy-Vc! zU-kTp;!iH!^uI~`Ka+i2X1ON(=SV;CugVr?Z9ei4#r$t?DGH{-PjGf!Q`@)7)OTB?F)io zFFdJH|QBOb!Rve{! zN%#mCzTqSF;*(+~l>%LqP63NaB1oi2l%n+O@k$~pj-qi>FcAzY114^gOte^&VPP<2 z^Cev(l{M}U;DZ*Q)~nGtEwBfljzf;P^g&!C2*bH$Zf)4X=SG}Fr%R%Oo;+zhU}XBV zny5z!z=!QoxFcGk(1dMV5mvcbGPRuG6|2n-ydWU4E5dx1(&Dq()d<@VqUzilc&7B@ z6?zy!8`m4siRnp%k}yY<60g7)4e_NBDxONR*o1(W*$$pXnb0Uy@Kb3KVFbg+I7;{; zX@zkqI~a{4K^9-^j$vdr@eq;3O(baoKo$-SWvG+z!%p+blvZL)=42(MA*Bze;Ss$& zPn-+A*cu=FfldLgQAfs;6fs9IV)UtbL^X*=;wU&4iUV6P6({BKq^L4{U?Z zj~Y288U-sOdC3F~f#)))$#yf#L*fP#2Aa)Il_ppQs~S2p)QFXdvy!a(WYo+G;O(Zc zQRng~<4h`*ZA(y$Qlr7bHtIdbWSjzmMH0KC4uM5XwCOBLk|FHJYb5pv4CK>LX*#pP z3P6tSS5sKguv;BcN&F%n-W(;1{Z_L$*8{gY%Z!R>987=WM70w3^N>5k}1xW zd!V`jL? z;!@b`n9RfAabs3t$Pf{Qam*wVc1A)H8=p#};wVfnUZ;=4=rI*pFAum;svZ+Nj*4at z0WKyD`fXm417|ns?6CmF=Mni$VG$qY2i;P0m~XMFgkh&L zN;C&;Cb}vHV+k!55uVD93UOX7->JcbRKU@ZFukTKou;^pYvoeyT%JNg4Y_bmge79? zO^8@UNygdMgcyyh0y^jm?v$QXV^U!tIrKghezC&}1L(di&U86s3S+=XAqRy9xtz?P zD@BMi?(xVKPUt3bb0t<%$gUSllLi=zjn{~1Ti6jbCYK6kR7Igu!ZsmOZ*=1*B)T`C zw@0OHuS17R#)4E6Vu3}_Wjc$SppH;Envk9la{I(Q7adcU1pG6LM2>_>r8i2k)p5LR zagr*Ky8;P@h!PISV&0%uLBM$h)*wfuq*7GabEJsPt=c3vP6y~p4@#*VMC-L7Hm5U=V3Nb38030^UZh3z3bssa4w22FgcX!i zZNzcB&;bRD%l3)rCbC5YHi({x*v$k6>JfSHW{i4XvWjhU=~%%aNgt$Xbu2mbm&n1y z&{ZP2O~vQw$yAzyndExZv?$Awbb7TWxx(N~B!Y~HRHt+q<9m@ zA!CM#nuJ2{jv(rwI~KCi)Y`CIoM4(lL`1;$5rhW3IS@$3EEwU00*OI~laq}^%t-m= z90uPlazq4VNVd>@3O+Yra3?J+9$Cj{+9-4ena#jC1&SoWV9^H6hNxYI=wgO|Lx#h} z*(y9!97>S946lNfBt?7>T#(7)u#grc^K=5bQ|aWGqk5@LhDUWOGTVvhDD)spZpVx< zOUkn8IBYft$BS^hJT{Tdh{`24oP{DMDU@_KW+RkrmL4J|oS{#QBp+ zfk9`ad6Qf{St_Dw@Mb4FB!-cpd(1_(=oQ|W%cr4`*m8+Vn=~g;4GVRkD(GQ1NDVG2 zU5Kd`A<9uWXjUpc!basvJ{gI4QLe_K6w?BJaYDl3$O$SjK~CfQ3}n<8GZ68*m@n$E z+Yz@)EtT=f6oX#| z;n_jG-|S}xIeZ7fV^@dWWC@Uptq~YZayR$!r%> zsuc#4W&_h})RQnXg*~UFcw!vBOlbl3a!{|=hCDayqnEX;t6OV%2vi@@}QC%)+r)-Opz%J zwiGNUEZ=FCg*hQ5-Dby!{S+G4#7IUp0Uyz%m$^BFFw-NJDNIJDS55`b$d_6bDyfxC z)3|U(flh@1GI5j>@$)fqiG>VNKrUe$0?{}c3DaD3tyimZVYY{@H_{!h7%}4Eah)Es z5VJlM?1m0?I$fMt0wqXzHmS&JmcuQ&%^Q$K@jNwK4YJB~n-~NPwZx+shvFEOm`n@g zJlR@j5Y@UFh)p7bMv!N=IK=S9!aO}DHac9CM|X!JRFl&l@%mMIeke(@DosHzMPooT znC0f-Srh`EPLRq8K_)v4gT!14D+6Kh^cs557{H{$B4tW(BAwh3)6#TqzN%!`5+jL) ztu(|^V6q(z$D)4t222o(`01!jAS1W}CFWCu5eZQU#i5YhXlC#&5;P<=kb^Fc%mq&* zd`yi)8;}ZJ)EL>WrxWBVlpnWPNwEMT=9BRTlt9M%aZLt=(i!3z-7-7H9c5z0;rxF zRnmc4bbKHz4)81-HOV23J1Aa0I}(u@B>=!95uZX8w-Ho9BxH3059p+rmlwk3v1y_b zmw~6_(Y0~#>TDIAry|I?Q6E=k6mh~1Hr}j?FobfqKhBDh;Q_BqK)|$yDzdO3V~f(` z)(AI<#PkLcU*u=8O>T)-Di8_G2BKLCL4`*wci9~l1xst;`n6VVA{NsSd}6tq=^(mz zG)AS zNt&c|zK$nB%&{Z`MVJCBrX0q&*p*NQB=n%b$-y(DT#80+bcEFQpLg3K>gO6Y9Bn{`krc|r3aZ*3(l|&$8qPtFDb!-E%S%+#g!&-IERu-W zDiu?1R7D69GeJ!y;Z;#rI3$p|8E!$?gz9Z7icn%RhT~F&mgqvXA~F%@qYA^W7%zaA zNSOwzOri^Tg~X(iL#9yh3Zp#iVX*`b8!cqgvIUqPAyF`SWU^U^N=bST$ft^~Lh(W+ zFCK8%d{jK9Zz3Zrpt741Hf}hqAxf=bX^0+FIS`HBPG|5U5tZ9zrrU@?5+jZwf&h~A zhm)qHmSkfG{5)bL$>Ek39ZO8Bmj8t^DHVAT1daj!!a(W@qj*kAOt6tuch?isSK4Nz#WXqlm()N+MJH&j=siWVliVFg49Z^LKYnVDVm7CCcSX`OK zVvxFpmQXZeX9lG)wL2Jz(#;ZzEts?lg$AWZNtH`D%pfb^w}lmSC&?NPtH`L0!-qjV zm`P9AJuZYoQ8|)aKk62SJOn^mB9JIT#OX9U#f=-h;f@6hjG#)!@GHE2Jn@kgh z;9fvuGT3z#AqmF_ghULdBO*a58cdm_c%8_lWwA8mL_`DZCR^mxc$nu2sMKtIjEUk+ ztQY|a@PoJ@P39+4XhN04L5!HK6g7rW#1XzH6sC!(Y=g^_ka;N#Q_>#9nQ$&gOpJzv zG8xP7Fld=ryNxJ+gUPx9Cpb~MiC zSk(k>0xuC*%`|)fda|P`5dtz431O3Ky-cUY?+g<41QQa{kcmGM6aSQGIj^5{m=H*dm?C z&WLd6ZlN_Kv@=v;gd-$GXv7d2Fi30;n@p=rcod=lJHUgL2&gf0kc35J-dL2UnWIWCWRrZFf4Y9*GXMvrShvtQ+kMd^Cb zWD&p+S(pvvp?f4;f=95jbr8arnA{|x_@Xoo$1W$4-0>ux ztnm?TjIiH=i4Kk;w&^_re;jqhEGSqtDwU?!v6)Q274y;z3oXvIBuoT{A1B8Au#BpZ zXho2?r;`Z&h>ag5<2?#u%o&S>LTr_j^_nbXlMqkC(R~p= z&I*j83p5;(Mn_jsj7D`7CDREA8E@ry5=I%~7fQ@rL={jXIG%wa4Rd|8Foo^5heTwj z%wwR7!VI=1s3L>b&^<{dM&1}^mt}SpTZ^iN)F^>w0H+^dlgyD=L}pKtTrv+`4DL)o zqH@WOxI@nf5Y_4^6%!c_FBBun_&OHZp|m1)xjjNxd3^#uo`hg=4H@t-hv={*acrqC z9CxbRVa&O+Y38UqVB_Gej+j2IiQBBoh|%N_xg|~<+zupc&JfD58~A1y%jEI-VhWsx zrG;jk+|JsKPAqs##2) zDIWInS$tYZ7$EpWQE8Oxa!J%gQ%ol&`28UT#12@5prTE#I+!jCeQei6>ERXfb6GD$WDl6lZ9Opy&30~%V>b&giIdMBxjg7 zp%^_u&=Ry^E~*ge@e#F%%TgK{0g=T=z+}Lv6%o7|vng)jp`kdH#drBW>_2nj-3A&$)8#33t6wrVUqmDu4Bx|19?q_n^m zVKyH=h*huQCX+fR0!^qlB=vGxb`e{tljB`HgrN2-lTyAl>~LGHUX1D0T{spP<=POd7?3$dfETCGLf+8Xg(~ai)T3mZ_{(F5e1|L_8*$A(4>F z6;c7$q9zbIJQ2YVRB{Ap(y9?F!0t`diHU~MNAaVAu#8VvsAI%rfUTjjh(>-g0%A!Qb6t^8 zI4lqaWJ$Kkpby6J7L8VEO@!?dSyb#3a4bXznS+l`&+boPFH5^OOt)P13UbTfRR>?8E z>p`SqvX-G@834esc`~Mn8zAF=b9N4(cbCLy6ykzzv(GJw!O5kRqQ&ztvWGlsoP$bW zJ5692*=8=uPmm_{VSxEMf(`O3P7$DVdJJbm$r@A}lZr)doId8EBGRzVjS$FEr(Pta zTAg;Unwc~yv6>D#O7yw7Hj3LJPKMlWcU10EF_o^6kbt95$?hmBVv<&z zB}XHZgfyy{jW1J?R7T8A3lU2^C=FASQjeXe)mv;-z~JewMm=8Ko0Bam}xQ^S$2w@j}zd63e1MODQG~?mnWn;8Qt%c@{CHA z+R3(pCt@pEc!AN1vobWo7;H35_u#@}oQKY#aOGO5iEGn4=yaSq=B2m*tD4DdJr_$g z#T_JpPcOIHctT-B9gl$b5tyX`Vbm7$MO0KG8)CvJpCD#(Vp67qN&#aRV97KDg;Hy_ zF=Am!QWdraNdmnXrI_$>pMawCaKc(!)a>(Vp^^xttG#L^LEsiDAmhf1i_Jd2P-CUj zz}^O2CPD}crL6LpgRS%%!lYo#5f^xZ#t6*?z%&p=I2IZpRklV&@)=QL$it^etwwg3 zjEe$Lr|T6Vjw3|pxfNFVtY|eL_-rrxO&WFT6G2+UZzE&f-o(}m0KlVk0u}%@jXNmmrXa@>6WluWmWUM zViaN~x=CWUP@I@Wv&OXMgiYS+;$S?Ef9%89gCqPD3EgKI07QiAmo!HInwM*$}dSEJgaP>Jof;iBy@`5V4*Z@yn$lR8q22_7rvl)Zzc2tOYAT_!mg_}{wzWNetQDh(|=`8$~j;#g|YgO|AbmbV!2^1FZ?Rklg~I%-t;*G${#D&;D0~; z%S$dlQ;fe;hLFn=HevO;SZR$PD}l2FSy3o&#VRtf@=@SWX+bX5;iIv~E%)>vk+T$>rv3h8F&=PVa4aSn%=#N5NQcp~zw;^IU5UaO^K3MT-iA0xR zkaEfV2f|v`RJM=*R_1@KDlKO|86Y%9zRx)S(XxMTZSa3`-v0rN|9dxF%J?8u0fw;h z^b$Awzx@BaaGp@^wafVbkHQC}+_U_L^B+RT|5bd9pMU%LQ!Q}W8xsV0?W#;+%2gkS z&%}~C|3kIEY~}xpHU5*a#z#}~(cF-*H%|U6bN^$o2BftAWO7OyK7$tJFz-Jo!a&M_ z`A064IrGn){{N5&qYSNlruF}|&nK1C{TqXAP?8Npdw<8O|KFlOwEW$oa)BWKV^{D$ z0|=mHe(f_Q{QvM3It<~Lzd!e{-XVfGGngoS?+$`6cn-VPVZm@F_2c!Y*N?EfOpHZX zm{HPKY6&{vRY)vY{dc{RS3>N+l8gKk4*AP@P7KcctJLFPc@eFo&wugGZg~dr)7#OX z@2KQ%`TN+VH_9Hs-iI&Eik`V{YpA*jMy%>Vw%_OwoF>EA*>t+Ex7Ko58PdHFRIx zShH*NL0)#HD$;dkUcJuxQ|?Y=O6<`k>z*x78=+=gmy~{KJ2TMq+7(Mbx9cvbG-6`y zZ<#;$-QjH7Iy`O8w~>Ps@tvO3_>-1@JiIvCxNqD-nEFTU+Ey#ntHvGt{+q5thGjP0 zeScrZj=uNLtlBfGW|JrHUtVoe|L}z5yQlkQ3+Z1>>!8RVNg+-v-4y&?RQ17?9ZfE7 z^6h`T>w~vxX4b>xz2(cEJU=P9ao#qccm?kxt*&S48hf;No#oJ);4S{5rT0(&H2T}f zy#?x3`rp25GoJi-bYW&ntCl67{v-4Djc>TtakA~emzQJOZP8WKTgkgiW;}UzsLP_> zTY`P}A9d|Lck9p$WWXQpg84nJZbqAsy*aSbAM_iXeXI1T+n{Xw>HenK^M@2S`bNDn z4>%)AJJH0qJ-b0?eM`ks;lw6;$979>&oI?E*RbEf>_&KN#+t*kh>XiyBXh?v`;0GR zT3Y;@>=*aXnjT%+lAnEYYsB@7>+F#mrh+VW&MMvd%5~d`3A+cRo?j|z(u|h89~)KJ zxojVqZSxit+&#Ih$>=Y;Jl*m3(Kh?B1=-_mhKv5@?eg3I%AU~JV%&XYz~gSeI8Ptk z`2COHf23#HUPk{jD|NXz!?3ZP?_b#P)BdsOq;yl(#HLws+{9*GM?Jc{ZT1cB%uf$c z##NZqg)_0~*mtjs7D?uJZyp+WH;Uxvy_?jdI72YK6Vk71=pG+;R)s#}(adPSDZ@w<)OUo2-yV?yJjM_Qq z?W~?3uJ0MW$a8U{xCMS;#Y?U7CpPaku2%O~6>9w41Tl^{q1fzsOI%a7z*ppoOZVM5 zzG(E8))Z?&M!injW4C(t&dj$Snd3+)x|@Fg^0rNf8m_F!DqkA&+Taw%s2WIfwDOx> zuM1~qesNQdoY^^Ofa2g#+XnubdnX2+>0c%-EmPEK#{~K3BGZ*~P|LVO@h!}phv(Pt zZ{624MWV?(VyQ=*{E<*cYcD9W@1NML?e&3oPb`_cGjrK^2IvK>j$+X)%+3uX=+{J=Yrd&DmHX(&-Z|kF@e%%d1H6T&VZr-swHZ z7Bc3dIf}jKx<1}+a`H3#Za+7z`Q&oL0qfsXr~lY|P2a@6Q=8v`Vl~@YWAu2W!SkKr zyLHqnwb!2 z^s^wyzwd7`MER!LjyWHBIhZxn(ZzXY<=6vL+qLU}xPRD}A!w>vrLSMl%)fI_^LSo^ zZ^}vdq(@4Pl7uO7Y9h^PGy*^_$$MW9MX`L~=@qJP=lxRkr^X-o{OX&E%{4n; z{XU)o79X`7G}j+4GUi0uDHe0H5mUbQp|s{li@?mgT6N%)-BmUZdb?5d;+s|D*7h6n z{`8OA_KdFewdcYw#Yfcl8-Aoo#h+4t{q}o3TgUzC$#pQ3v|+Wn zpTg%mZ?5XKiM`%>_(2ohs!ztDXDTlCsCPjwQVGodFa2_ke{*J;q=n_e+3~H*Wus*Z zuVr4ryWv%8iH~)izQDF}@cTFMha1iwTcP@?b&b`WPlM&{s#-9MSiO3~%)R8R1M7Af z6W^ENlkR+)-=whe65%JBGg~@RvNA`Lv%zsPzjfl_@kN{y>@z1nalU0r z2G~DdO4mVeo>#GFwam};^Z-5F?7yC}&U~QNz|VW~TKk{=&N3gKRewfipJdB#K2$Qg zFKrH-TWcD#Vwx{ z9qqBmw148c4X(4pjqVS{?|;;+ZVrlGIAY=HC7-4rQ=#h3qw`z1&i>3ATc=Hp$piLJ z+s@+ANS1^3rquZ~b-KS!Hh`2SUo{B*^lV*R=e(->%}-8K%zs%2Pd8 zcU~UL_{oL!deSuI>W~lJUfo{4Y#utqjB)B3Kc#+0C2mIBZ!|v-BwKTy8b^QG?AK-U z2j4rGuy?{9@;Aw@SgG=YAs>qGoLxQtb;lCZqp1_WJvSy@%uTOLw(iw}1WT8Rn_+(b z`=Vpol7UGn&vLfZ+)){zN(G$jOeYNL(q}^HkW|mtF@)`kX`5RweCEvM|*&|M0G;){(hgNB^W*{rj0!y~a$O$oBhj zCzgs_+na!mL3O9c*Z*b|r^TRC^@dL$o3|$obmE=2Knr%S^CI>HItV4RL}}lR8k%Jq zy#92wGoe+hU-wFfynk`_+#191bbZ0JBXwI>TQ!FoY6TXT`+djyDU~V(ZXanjV&ur| zTORS;Z!HI_$@8Add-v>FVTES3w_{dTQ31Yv@6Buagk+|x?Mj3kL^rLy{+AyuFf;qW zp)wPi*P^O4OEeSx0`p1E(-usojjN8eYrE(RlC3FgkjI9 zg`n)$R&Bg_VCpva-RF6t`Q4joe(t?{+vA<%qYp1^al32Yq^8FA9$xf#=z;|;t5-^& z`GvY|nV#Us(~~lW zWKtHXQSE&cLR<-b}d7jOF#+@(ccE&SlzaT8_W+lQMnQ1h5T4j@tPjC)yD+ngTh zeVel5<}irlm6TI;T#v6?8s#nRI7!;6~4ddmdDrl=W=P#X`fOyoQ;*ad*G`do=w9jr3mLI&-^$HNL@%=?+tV zPNTy15%4{2^AGnm9r*<;;jE_iv#a}6xiK|f<6!jxx^-q`IjMKTvHQyFw}Jsf-j4vhvN_Op zVzXIyyFm1f|HXIpiuuWHWHEjMYh~hT_G@lp`2vVhjx;^eXwVExb#r}Zik;d@lPE*d_#T9*mfG5b%4r@CE z!c};y$*akCXMiV#@R12Y=b+e>=4;Sm`kvf%UsP|9+hoqVy}(gHQ*yU9C3<&lVJemY z2)zZ(9n}0=#?@I6AFdZ(m|XuG^#ah@of{|(SFw!+tOdkUFAp+|TYJ~H>;0(9+hQhg z#1w$5VB1C(-9Gv?fl!a^yF%EqDf!}_Wt{g=Z$|$E-?dr#>f7;iH9x5j6NkS!*1nFZ z`!6%~<1n>-)hZhh;Ose?f4};5 zs}{`ddbS?r>N-pHXSWXBep5C6bPVX#_Qb5MB_pBP@Z79xe(NZk{AA^tjdM-7=`W@v z+Ga0gjGonc8$g$r1yz%GR<_R13Y!R>F<lm9jMc`C9-rAqf`kksoGtWH?_h1GQn|Tv%7cAo6S>J&cX$6a^2_F?Cw_g$#=3g z`PUFPxJ9iIb{O^S!rRe?f z^*Rjevg=(PaJZp|d5sURN*xDKK=qS4yPpMiSfzA_mze!Qo!X8WaAS*lR=5;m)jz#Buv;XOP^v%SBX3qD-1O6-stJkR0kT#zY$C@H^Knz(U ztN7Io@4T^ZM@`Df*$WZTl#Z&7xXX*a{eKF;mg%SE0mzYqN?GC7wUs8XMwGn+aq_v*Qb7z?KyRD`&Dgc3~-#=HKa#j z!Az6MSCza7XKXnD?BHW!N9B)`@6KMbFn(iA;70zJ-z?o>JKB8e(Zj~ItrL&NJ{%e_ zVz>ryPTh+m`*mxF6lyN6+E)@b3`l$T$3=?FoWbeG{48LuczNE8pO?c{jPy48{_@8MyVhrqalU!1;o-%$5fSN$WkF7 zzMuVC+G*dIwGh4h4vE$(T?S6;-0RHVo@! za6rA6)duxm)?c(nQP{|JabxE%n`hlQ6JMHJRh}a|C(YWM(WPWb} zYcxVLrfe|msa&H)_e)K8AK$YNmEdhh_CCD0>G-N%$O_(fe8av)>~TmnQuCtVmypw>?H=~U?iH0GKf8CiVp;9<^z4)`s$JXh z^2#35q;A&2K=Hf8rZH=e_V{pVjN)gWwJU&=i@N1lv^gmqLX5LM`?l!A$*6@d%*E|~ zG5jjI)xL~M1*Y-slHX_CuJ?7m_S~E+nU$|7o*cV4?Q71Lp$ETFx>Q?_Iiz_#-al6m zd{?rLw9nz)pgjNi=_`Eq@g;et5w`h*&a^(T1Q=6uprpgSI(1A~wj7}58rRv(qJqV| zsZSnld%U%GPGDfNhHmtCKS=*}Ws_|MFPiXc)#)b6TCuplf7$t<@cbGBWBZ$XKh+XW z9kvPp|FwV#z}n@&Q&iKx<(!Ra1@lNOQo}3pyNPl>BbIH(*t^;4Kp>BK!E6W)(erzLCZ!Lt7kNR|6aHrRn zV4DV+z3p$tQXk#)9IaNrV+O*O?f-Bzjy9fDbvDse7)0`KoXG66^%}$=_<4+EBg;m> zKkFw<)tAODl*tvoXynrX=&1{VMy&tQjWNskj2oBQZ-uSYtvhwY;P=TJ*MDF8)udh> zek?pXzm|2jZF{}5-*-?frA%G0APGUnTKn-<1pBc(^Wxiqwv#${SYT|S2xg62zIx}I zgYk=Z>JNB*YsTR(hCxD9p¥{lkzq&+oExz;#?mYcYwtgRF^*MkQ5ZmVi3Vr-O0tXR?YYM7m7B*VM zH94y%uiY7I>eOTcF9?yc%Y111Vcb0|hx<(2mZcGpG_k|5i z#+;{?2gAvf7H6uQ?6szk-@j%od3>|c`LEE%JIpVeOv<{uY7wO<^+s~$!4bozF8|hC z6~j3fzbeH!>8bG?&gwe8TgV^d_GdruzGeN&Nt~fipYfj5N&TYzjo-{qvEH+x_q9z} zXouf4Z_CyU!M==6$uZ_DlA&vTo!;+Vq&`19wRQe;HHJSsS3JWrf7veodR%^(x$E-e z51r?1_$qPoyQlIRDYs>F3$|75z1d&!^iLY{RhQ8G&3HjiqtG`348YYopk_rI=k-{b zP#&10+s%)`8#G95~=s)zxBSLBCkv z`z2>kwZ6(5@nff#du}dAueMv`Jl(Nw*ZdU~*KNbL*0yBQw9OOpFepFp9K>1IH&X5W z&(@82iS2QR*;RxfYZ7uAKj)-ylU?!RFuZ)#fu z;DqA!cSf&N8ilK~OZE*0`=$4NE@E0U=xr1Ot8U79%p1D$wpoy6%G~!puRjD7gfBMhj>-y$q+UtfZiXG!+?-Yvo0?JdlH3#WzdhQv zJevEK^Z|@cd&SZka8hw+C(mw%^ND2j1_Ikx;UQ!QdWTf{WiTda{z@f>XM4w_7v}N{ z=ckJ2WsQY!H~ZI1S4GBxt5++m>WAshrJmrc0dJ_b=QcSv;KbyD&M`<`EEdc-GIi;# zXIlW(G~H6UG%1;zl|ZA9uhbm;{p&6j^K+iE+xSkuSwdmmdwfS|Md5L+F$JHudt4gD{(NXyWXrnfG<3{ zZS{unL+4|t_T`QWyFL_my|BsW8(jO)yQz6|yT&F%+HLB)v!8bGE_WzkRA15SN)*W2 zrB$!aSfN~giJU&+0V$>4P008tCh!4oJ#2O=4_4T`XU}=aOYg7vqgj<2RP{k#Y(Tpo zj$fu@nYO~Vw_ZKHaT(p&u4c!hx$Pc~DlCZ}1~Z3}J3!0^yS)AO_=*juGO)a4-jA|H z(rr=CuFSR~1YowId z0*r6iJs}JWV_F84lJsAq~AL8*r(ogf7oy2Pq<8K=WDDr7R-(cLJ&RGuGphh-X#;H zg?5i_G1%_ybx=^a^RTV6EPr61Ex|7!xs?ZpXMvl)rN4V|wjZS8XYeldT5lDt*?U^h zWygp*ZTOpg>-tjG?agiIowc;&M6#HE4WzEWQJeo#HVx7wRkO3RdHUj7J>Ct4%w*%o zhXL1ocWOqL9>Rqz8S6@xS2(q8P1O9`&`!E_Ex~8Bmo6B3AYHRH>`<=MrgxApeqw1V z?lXIDuHxy65M~BySLD=wb$8i(A?CZq+BOpCubt&weTsZrBCsMa(iiyM5G`MSi!tLF2~NlBY3~QH`39J}ttId4Tf7^^0*g z+GKa`wULrIbLH&&HHJM;ZcOSiYkPn6HaMtIcT-9BaUqsQWRUao9ZxeR%v%QsV)X~S zd_??xe5(TE+Lmz0^V`&RwZNiTvijBYK-t2O1-H-k9Z(a}9Qkmj>uGxWIcFejDDa4;--QJ4^6H#F3KrhsnH!ITKx5ec zY3SM7u%4Q>31x|dg{kqT8c=)pg}x!(<&@pZj5kKa6;krC<^DswWBaE075 zcFJ#j{p{9{y|=8Za^x6Q@dk)f@KD+I0+_R(LX~B!Yqf!VK|_crOHS>JIsN~5aGpKw znBoseM!d;tXg%aRzivr~BUhj@Vt&8X%QB_L0!55nRC>PSuU@e^yaIjYQq!|*f7`HH z4|vhH=WsSKveM--)p10`+AbE&zQZrn;y-uKXe7f6=R>J~%EG8lb-T+~&qgKAGyqR}X0V-9O6%tQ{^!?OGKkK4KP+1G z^hnpoxtmeRM#VDm_3>SH=0V_KeKvaSv8_FpK74Ys+qjvaCgca-6_$GVYe=g4=ib=q z_wZY}+WYHt>#a){f43~-J`Ww<{eEoci!;3nDeJ;_Ps~HR9sasGmfPuE$^d3d0A2&B zaFnFjyq!L>njPXf>^S!3;o0Q$9c>;?!44n5zAbo}2^xz$gpiekFiL@o@Z*85~7&VAJQ<_Zq*)aXj zIHWq!cFD4C;~+-jct*E{QW!W9gxnSXLgV{})C{}C^d)ZQ2MAS)c6B>D4t!)W2YmaE zYV&#y<$Ra5ZZX7QGTO^|YdU1k8@@Mpe71{HY7NuJh!#L9r0tMK6D;FK)o9eJ=U{eE zW5H2U??WXP9y^PCKF$Is!<&3pNmKb#T78k(dvnXjxwqX@1Xl3&bqX4nYhLr#wQtnl z7P>w0HI72jUO{>cl(o)(=Rdi@Tsqgwx?>*JU&P5_*P6NA^yq36%~6=?^j_k;Qb(Ou zVS&aC>GJ$0)2Siro)LF5@3b*BM>x|_u!u|ae8j*@Pfa2e1#oGpbA>G zKTx*DnW>9UukP0Ztl;joru})H-gdOz{JB~1;pRG~pKq-?j+wYIfQ%Bytgo&sv6Hnr zTTfK3kD0@5k@Y-YY%D&l(Ecsp1~iWEm(h`&DConMLBlHg{dEG#2@A zWyq=$#K+95l*Y|TzRL)G<*r$p?Qfa#6cee(FCojj z>t(;6N{cR@Q~)JyzNV^o&al0?O|HA_Wunv)%>0MJwSX`KK5R&)KXIjO72Q31@v&_D zjEtfmANpUL-^p`WY<8>m?SH$qhPVLOZo?8IM=#VGGH=4(+#Zj+Uu$T509nXKkrr=H>o~ay?RDtu&3nI27*7*IL{1PK8Nk?uZw^!J|gUf=ot^19@inftkO@3q%j z_w=PWrdq#aBkuPhN(^`F+2eM)^oWf=a3olFb(&c$FDVr%G@XNv6q4Tf$tKB#0Z8Eq zaHTA#TO#gHHq?LP0TS6oEYYVB_P@*5ZoVHiy)54}MzH27$;a1wv~K;e_JPC=4>|%> zt6WqH6>ap?!@M}_0|J(g%x|TB?}QuKUtT&oIlj1;ivU6-Jy4QDAQqUeozE=ydU^`T z+jQly>1S+V28!;ZN9j_+b&r>?4V9S10)3Qf6$Pk+ zHf$f0o8^CCR|nLT`vdvW%*oCZ%g|fX86Fd^$^n^!7ea?CUCUii6A$H0>Sm2s@0v3N z6H>}!wCahB8xq+qdX4K+VWm(yXP(Jld1mpSG!F}nE z{}4U>9+WAOaF&FGg-t^g*#^~C3(_#igs5RdQY_FeKD59v-;ZpfH1%xG7ya;&xK$sL z(7q~1qo-=EdaWZ}Ns~-&oj2v_Bz_WU2zihYg(p^WF%PW8#&P>lq|xih5r@Y@V*(l} zCD}1pV7HD3eO9#)S%sFuNfUO?9Jz5cR|`ibl`|n~r2(oH;s*;ku0#I6zLGNs?inO= z8}Rw=xiE+2Yi7k@BYdS;I`g#ZNq_1sHAz6D=T%pAq5=F?W9ZSNHQq zt`ZUon95%?C}w@N-9~{r++7t$BhDMtdAbg0!fCt?LGg?E!>vUWCiSKriPMi73j~zV zztPk~y|w4fy`mTE@2#E z287>1Wbkz;qI>>I(nE(&ca4M`%ph5J1(}%eAUqLjd-o>?XcKb}&u7s>Z3tq4D-%sT z#x;C!_ys~?6k#EUtANx64l?slruT17W(|&;qN~sqsrznALtWx>@p3W&|0Owi2=<$twqRB=BD0w1b))u1+Jofs3SuK~h~q51V}9vjac%zN+o zwdt$#TQqi;g`ZJ!S@}-!US5gSF5Jw??DNo?{hI`SiMRRhm%}9{aCxMEQ)VgVYaTE# zxPN8oQn`A_j@`k3?55s5Ohg?1N-^uaihpuP?QPNxlWVX91f|~4S}!#mQ!EYnExlR! zG1ODuBl*O3b<>#jCe_K5?d^Cdi}ll}oB%CUu@g8MpTd!m5rAVdUxU817FQ)R*;<$d zR!s~dCk}F8^abhHIJqMQ5ZJHp-9c*+tlbxTbzRYod?3s}|E4I}>k)uI}#Rz3!}R z>hSF{LB-5HRK?jE_w&ht_wj8m$!z_4hl7W_v4Mv%y%l<6ar33B@((i>5`onwwZ(X! zL*c+!vjpW!)DoL6a1fjvhwUXIpmFrT@J9Jhv3&ESA&1mG_0du8PTM>HBTu$rntyQr zEs}k!dW%@#l!C|M)~CEXmfTbd`oQ2p>ra=`?nYXoPqRLyoMr%141ZX@2f=HzrXPMFj<_*wKl4 zAd+jWk_~%97A9i(=gCz`zEV$d(^*%xRxSmq=;`>SNFL0Ht?x|Rkwm@ImS=yvHNYH- z;o9i+g=D&qkhxM{HRwDt_1nJ7T=#vV@3G&3UX7S136XUxGbSyT{5G@W*f5dtA7cD` z)hA7Aj0DQ@(<@p`1NY?R%|B=7?dAIf=*C>U{lb=eNcLH1uF}rx9A`{tU)t)-F<^6h z=Zp+8HGySpvHtXqb_@r~bNx+%6b>y_yC$;AW6~6jko;jk`C&!EJKL>5`~6*XpQ^!o za~b0gZeKj(YNW{S9tCOOVo)-X38^!U3k&y|-8gPq%Hvy%^I|xr7hF+NfQu=iab7Y& z|29x=nfHB^yLvs8DfX5qJtoYxSbPG7CX>oZeKj?`&Jvx;=$*PClSA;TI(A0eYTYXm z=^gKh|84^J%+D62^dn{`<%Imx~U(}SqnX+!b9<6zg?Dzqgo`T(fJ{89`F56~9VrBnhtx4s@JppoWBKr~fUzC*M2RCFd8(IL-E zSg%7Sjzpri#F5hJO38(ZO;xi!WX#CpAJEMjH9eioZ6}CE1Da!F!R2u6}(lK{f(uNG3!!i{8&popwb5cdv6ZU20g)_0x&6M>G;vA=Z~+eymV$C z4AG$h)n_ShZ3X|*II8`uEpB>cand<7QIJb&ZTa@kkXadm_aw9QLXU`LmDFUmy3#xj z!y^@A)CuG2?!P*JfNr1*3!h;puARZj?lzSl{Pr$f%mX)GDTeX)px4^wY+fY8*z^~I zOSL`~e+YE0Qz_j+H(vF4oe+?cPKB8Spu zZ!MQThs|2Ucx;+gbxl6{T0Tpn#89~{@21?oMJDnjr_6i%!A#Q6tIiX}QKGkVJwlu3 zkZp-at5^^U&{sjJH7a@qXR3PD(!*E6k*=2F$+_!q;w*y{k*&-k7o!2GenE)0zOD31 zR+BQ6CRW{RP>FaGa`zWA+H}Z=jaa?N6$ZLncY)Ll$lI=z?pz8hO(g2Y=#_OIbehH+pvPRWT z(NLgEGfLW;ji)_(U4dl`P@q;93!mcX%p2bO3sf_s`%mdALNM&i#i2a|9Qd3`U;yPEZ z(*a(;kaGyxFI8p+_1~Va-Q9cfjk7*9`BJ1xUVeC5E2$;Sl2Z~I^u?B#Hnf6grQvl9 zUtWSMe(T<$i^p{;V6ZJ032*(;u|x-ri_bkw6#XEjNsOus9ey=4c24IRTlBikyL-yb z`q!vCDUgo?gpV9E=@cWWPl#S4C!*Tyk<~bh{Zf7h6{5FiRPCa)$kwPX-j@8F7Q2PN z&e1b*vS6M4@>MUIlc{H4dkTrjP5pux2o~EiD}>nM0g-jf1YGY zcBd)DmJZu>_;N0c@G6k=UTZ;2dSSx)2Hd4?4O!%w&lmzqQbTh`cIJ`#)2sACN*H7* zs_IX4&Zz#%kLCiVN)j?l(EuYa;Vay9w+^?K!bRNvG;2C3GFaWW&2=8Qsc{?KF4yLg zZ-WX<=ccgA*0>u;Jz_`bK%Uj%Fp?N1)g zu+8mI^FS_u=xuByCK%#ITK+kaT-37s1AH2hB}_)8`^bhD8j<6nJV%G8cMz*Ef>v-n zU#%Zkrr~atAx1(FDb=Xo((pvYs9H3RJE&iif`L!YJNK;-E!ESVz_S$T1wV-T^klpA zahsdlD;+ECo>@gzF^GxN1YO|}Dc0q0NV^>-T)*GJ>TKv^c>Os`z|}Wr-eDW)CD%F9 zaD0gL9l7(4`dQ++qp%6UlG#iP?#DCjeh~AWCAdN-5-t}&4}q#9PCXlP=#`((3~RZK zgpXfH0_*J&nkIU)0PzpAeU!1imUVZAFz#*h;t4Z-XHgwi#q2JJ^Nu2?4_=?w*3T{6 zL6rFmF1{kYc=}V%_`d&c)t}Fuvs53w$nR>0y?pRC&PUzAhkv+)izXpESSfk_x1dst zx*6+2Yq^l7k(Azbs#Oiv&wRnDcP?qFu%ebezJ9F2Jf&mVN$sRF%8Z%g)eoF$i^&0$ zZEh%oN0D(T+xnf4x|#R70*$CQK0qz}ZK#`cdFvdp!Zdckplp4kDOsB5BO;6belzac zzC_R?-iqc7lf!8jD;n)iejlkQ$0oY2?($Jndy}9_GSF$Z+@ry0 zW*>%S>gYlVr-6Ab+Z$7E+luYt`Mlb~Xd{D9`B(nj&(-Dku<)|KPBnMa>O|%Rdu~GM z%r~6@7LqkitIX{IJP)=`tIK-gGj_s(m;sii$2lstCyYcl7Py8Vr8 z=@UGTJMVG&^VFjt4Yv4QVFx^^t8D7&p1jE&qYNadA%ew6N<=mL+vkiQ?DmZh5+iL& z83x`=C~v;l9?cfk^M9(k>hIuwecgKMs&W#E*A<5Yj}fDv;aBQo0;gu#GPwhla>P&6 zk}u(^tvq1GAg7xw(u>9};V$+w>{fjvaIt*Ok(aQ(qRU?=(xUy~CTz^52VFVmuCQWz ze?VX@4ac84mQ(7ahS&f5hwTZQ&2u)br@U^CT@9&XDK*DM&=orl))r>ND_vfY=2Nqunv)sHEmxX}{V{Dg=k0k<`3>~V!% z4>c>&<|9G z<&%}+yhq{WT%FT$O_g{3o`-qhmjmZoTHKBzmjSa4rqDRl=ke<#G5pZ;(YpL8mVX3!O2MXuM~PyP590mi^f!#;pbXmjiL4j{RD9<#ox*{I{vJ zCAIpH$E^cV+FyQzPCFJ@alCzQ85WT_o^%^y$D1csb-M{NW4%u46G;7^%V+Lfd-k}j z6V2Sg(AwEd*Q4*dIp^38y!y0s6G#&Oint!l-_SXFA$aC_X12Hcl2oB5`pZ~yX%T3< z?w>T4NnJ&;^`zE2CytKEG5qc)Pror1C|F1T{twq{#u{JFv=4-|`=Z6_2mSd~rPO)8 zZ!ea0)A_8H6sy08?v3|AdD2)H{2rD~=8U){tKLbIc$*D5XncxJX!wv>%O1V(%o1&| z^8C#+RBlv4O1**8F`5 zFMQ3Q6%8lcsn3!Au za3D3pyi^$~x010M;&n?F1vU<;u(MpID3pypcnH^ivUF(c&H(9yQ@ZtgG2AnUc2ENx zwb)K$!J{D5m@UG^S7c`JGhEY8z+DSk>}P_)go+9Kmdk2>7jo-eEnAO%kJxs+W5Kbj zP4b$HJe_2Wy$bu&QYZB zw4ucSt7sZZDvF7%#U@`ypOIM6J^8*SONtda>3n9QmkD5`N)%YJSfarn!j~XP@ zheMf4Z(n4!F8{NR?(+duYEk5~#HsO_wCPWU5lAmgLRpmekOBUu>Z2G!=Q`UDng#{p zWj7*{#yTcsaEBX@;F>oQbL}a&P?sA8?XAc7OI}(b ztlv1k0_)tij8UMgb*lME{0&H)cRVqp)vGuYnP^AHu?h^J^-k_~LoKQ05-8vs@C}z+ zGnrovqUIX@vmeDVz8XZ?AufVvmWsvK*k=`|KvLbZJqCEMC>v1aUB>r;S zq(WIIRlI%MwY0gZvgCLta; zsf9$5vZ*y;6KDcdI$g91PXX05X7F(sOVA0a)Adx2uL6XR!~|>UKpTTh76kq@Uht+)wpupeVcn1ITxN< zDrkQ~=uL?F{l=Bv^H!45vpNo96t~KB&uN$2brsI8U%z+9GGe-_KR}fk`&Flh4AEDU`s>=t% zG`=cm;QW(k`EFdm+6UB`qlt6Yhpy{Hwjj*-j;l>dA|{1wcvOZG(LKScDY83)0}eKi z7VG_MYNw6fEw=$6So(@x&G-$MKS3tMiNxox$h9Tjt5$>}PYHo%65ZTJBJ=06cgbFFiqXHEQFecjk6*e@pDJwcBiqg7P{3z9?gM@BxXysJ`s9OFd{dF zNf4|#Y97B%ylb@4uMm%lURKd|jHV(~CPrI{mIW2Cxv*agkCgu^{F~XYT#qP@ElFaa zx?s7;-N_z$ zDM2vP(^`tgLqrV&2?!I&zeXe`U1#7!5%hcb5C#=IW6NafNpF2#){}PfaM@)3J-;SM z^?@l(`yLc$SQP^1mSe{+wu`w?rkSXyRH&1(USp=_n&Ixn@{f6BwH% z#Hh~59@z;s%x_N2Y#FzFx5H^ws#G}ANaRg4L5mW@bUQM=Xy-Z+jj+u1v2!*(!9N;H zs;JUUAoE!k91XTI5MUht$P@qj`3r)t&jyCrS1ZeZG&~`#2%FaYynNr31S^59#c%BmhANxo&rhN4F9hGIICaSnV?~216?};bzC69B@~e4XYgNJ}iP|#uqt+*~iZ30#A_5e;z@pYL1Gn zc4e%Zmx9;S;f5LjM9ZerOaI9Vgi)Z|9!gJbNm)xvDXO8slxjn%j9Sj7BT|#%#w!<^UDa)}6MT z99QwN8G^;pbBq#`RmY;#hWx)YVeza3+&vviX&SzXKhSl@0m-8YQp@$9PwB6f$~hZ$ zB3ZL?Ml8-o1G=7C7r9GswlPf-ddayW0IE1_^+cPO%FglCo2t2gZhX!0yFV}L%UP@= zi`&YN@5bJskxV4Mzg~&*$11n4L4oWh^7U3ek|IeKeY&za8PNS4FQPgTzS;rbaCc?A z6od;i(ergE_EQ@P%g6yh%q9}6P`+ID#E*)HhGdA_N6foJdt5T0XdpZtYeB_N?81Vb zQ9a_D=>0G!uz6t~&dP0o(lRRM%wB9Nk4-GX2#DV`5BG1UJ8|i9IA2Vitn(rSV-_#- z_Ji%E=K(HWZa}a~oAITiNk5u!PLw*`AUu)W@<{L5q->96w;!tbfTol1$<|dNzgT_o z_GJ=7lSJde5PU$7~xWUVPQ2vQLZ`B`#V#N@ja-<*@`AZFFHrK7)1MLm>%F zzXm&hibAp6Bs0f5TBh#&2~sDwqdiyV+Y4V_bJY6omx&4iiEdQ_xdWnasy@#`m#fbjmWI)I^DMxwD^2OIo%%4wg>e{9(E3r#UL;0o^(z*I-g8; z?p(@wnOg2nTgRxO$q=v@qayJsQSvQcLQ9HvY#Za0pd~jDd(Dw9AIc5O~*Q{mj&Jd$4+C40k>P*Y`YK#|!DJ%qv=+B%y zJiJyFW+s=ExTqzlz~p@2h+Fy8-#)~x(*KFbMyF_c$-t+UsM=A*SUN{};^;O9aZ{jB z?@AysIg;Mlre432@LwzTwdQDGIx&dPH$kLfS*H!vl+Qrq?o`4`wS~Ju+_@8oP<=z& ziil;K^j_jg2PHx0s{c$WW82K|VYE`=)=n#8z5~??((e|#2R&jSXS1nuD%|?S+ylB- z1Y;UVR`i@-VKy!BOc^l?B*k~%QS6Onyrv3>${DKzCE&f+Y)EXLwkI0&rkv8{1IaVd z(=_f8hFX6@FAe9js+8X?^QCta_saL%{QVskRM7R0Nlg0ZWm4kAtK4V<((V)TUpl~9 zobbKF(BjYFJJVAm>CRYo1Cg~?O0v!p;F_zS&5llzU-L!%~4h&E7@(_1P=Ll06e2Kw+3WMygrp*)Svzl#oLeb~+V~V?&NN>Rnv}NJg<+p+N)cCSd6B(`Z397wnZ_>h55F7-UcO{tUQ51V^1!mw!Q?#jn+laJ zJ2E*uMKg9pzKN>^VbXQ995%n5X>-Znnnd5|E3+25=yc`QV3AJSK(fWAujL&5GuzI& z^Wc@@z-V4N<<>e+?EX{Ic{7?|R=5*Ui7on>&GL_0($I_K zA3SC;h_cjFJd0Uz|t+~0cEK`(H zvN+SMLeQbJ4u-<9v=u?w+2nOm*1L=&m!k=I3n(m^SkcXu4y)XLe!i7_SOu3-z-*_v zgMdtjEDfEMqZh?AIOML3{(k&>F-V?h?IKcfSm2cSr*Q6Tsx!JBNnEy0OZozlrQaXT z>D$k(XI1Z=HkpFTFRYXXrY~26 zKQk+O-jXUPdti*eLLy>P%b4Ar9`%HI9&?t!J2y~0yf{FJTJNc*zo${;YYU0EDg)+$ z8yi{AM09?!GKh@iBd6^Xr@wq{7qT_3>rpbf&>H=mF#hDsQmJ)BW?<6!MIO|eD9XeR zq;-nAtqp=IKjoSgWA$YfgW4W%l_B{iC{VppXVLXckgj?eJ)`5Da6&t7P4&%SM))U4 z)Z0BtpGCZZntWOe3v%hbbrq-+4+k?AcIST@%I$VvMNcn3_s?;;LI`AR2Ju`%H+bK*fw!T*93l&tCz% z-b(Imdh^d*5%2Qs2;;ng7KQ%2BcMSW&|AXZoqe*hT0J)Qc;+a|G3ez}$~yn4=0Ity)5k)=QLXg`>OmgG za{+OVGYC=E!l6w_<{E));ozu=0?nGnQ?p?Y&DTEWCysE4J0n%)Vn9 z@^qpxx|Q!3IWp4?s|xQr0^_nSD^M>SIkETiGQxlR<_yG9CTs6*71~!jjnoSK?3~>) zkESEvJs)ywt^ldPfW*%(37NL)dqF^ONpYx^G``G+Jnchm`6J$LiOF*WXg^kgl^mI~ zn-;f=NX!ca^Nkkhmr^KH3r54xs`1sS^D^kS9Wg>WPay@Jl4_NRT6tT0*K71V!XC?uY>uKAYqu*ET2N$f3 zuM(=d+&9%TtF^4Xm7kAPXkvs0nhGd|1=$UZ{K#Tu!n1wb5b|czRW-78kzq90Z?z%J z%qdiIiVAg$j@R^0FX>O}0KYeHRL9@Y@n2a8Zn|`OZcZt)F`TfZE;=x=d8+kE;3;h- z22D@<`16kmw1_K-c}9orgXx1=PUXpciL~K2VL;7nko!CTnO8?jKM&Gv-b9_dPths? z>$Kyw5DKH#Y4*P9cTHECpd9d<3#)X_5U;V~B^&*$sa&-E@!gL|t4(97yO}v$d`mN( zHUEt+#rI_2%(XHeiXSI_!cLBR7l?hXFiekJ2oxmUoR$B&5zBPfAB_&`fKwLQ!5tPHZ{esqnZp19HMvwxp?)i&8&_ed48)N=2ikf zcI}2%pi0qw3j;z;!MWyFBNmxdngVl+uk@`pO!V4qv>JwEn+M#@o&`7>PkKqAz;M=}c3ZERK4 z66|sQ%e|F3j5etu8@(T=(Foy3_D@rI+Gj z(G3qT6Q94z>^7(?PbcoZx??RJnb>}^eUqhe^Qku;`{kZn2q!>pPbk9(rW$TjYx`beae)JawOtXp?;;ERkHC zvxXcuiuHFfgKEmCXE(RrC$&2PIOPNE0q?V#$O_`@dxS5xR6BbN8sZ;i0xj~4DXwi~ zDM;(5XA4?bUshP#QSpboTy3Kt>(_OZZxSGoHzI2IC2V}8NvV$~&R)k<<8Z<1x!|=g z{OrcbZ2~wCu%;H7SVlSSt!XvnT;L*5W7kyhh@Zy(?EiR7v_5YZ5wm;nYe&%Ga$^`4 z6n_vYpUfE;IN`r#@oD6z9>!WgUn6h}{%HDG_+6y1dVY(p;K9AB3EjVvh$cnA!i0jovFGOB z)=x>}Z6zwuhy*X0aW@rxo5XFFiHPJX{}CZu=I}gjrD!mcl!B34i?QW0@7k4A`xI&{aYxb1L{pa=U5)-+tZ9WG6ewNtF z$|-bT_(K^uoUgF(n&N+nBA4B;JWHG?wJO)aC~7zQ`yQMjv477H?Qit{d(EioUs#FQ zLT&{ep#~`_E=>6??xvf#s2|!L~y^uu*($zH^yqGOCSO8Ijt!ytcPq>lj#Ycjg2$-B{P4@aQ zoRdt_ND2}XJn#0uPHE;>0V4LfUr>Zq6|hKy0=Fa#tduOlQi3%(JriqG5~L4p*b>A3 zJvp#N-}1|%D3-zwVqmN-#}t;{{wpjmHWL|IY`fGO&{8Tf6Gz z6u4&0upsPjz1@ai8c}PC_rgFm39Bt#tI_^zx3vR5Q2sP|6kc(U3|Gk;!-Dbs{jJwh zz9|oy^PeVR?>fAu*|zB7j2@ju+ zIv3r;r%{22<8OXTfrmjSjo2J38vdlf@C6=DK;NN-HihG04Pmgo^S|ESQv!5%TL(OY zFa+#aF++>c17hq*F;2CMqd*!3qM4-ixG&I^@=aKM(oV0o5p-4-)0A>gYD=nS{WU{0 zK}{k|Bvn^?KKP3njPcyNTkqT>0ykl{h-Q^4LPxHhfzSHC|6-U;AcCEB&zjz1zvdK1 zWX|lc=v!nUGxp(x`~wh_fkzBH{Bhf`hJfWcOsxdsw}0IyabUht{2%kJgng=gED@YC zl;fE~iLL*7CBJ_PzaUXDh+Uem2}Jn+?+w+&4(LBg?g`A2&VXH0E|6xpFn}Ep4S|kH zL|_(Hwo!h0_#w=xbUAoL)cD&wg1s})=>a3wMOyIni`c=4(LWO&*n{o#??vA}L01`L z2P3IMFqvSF0v?_m)htqkht=WXANZ!9BLlBuhcifhkOVtLm73zKIdgJy;>Eol-bwtw zbu|nH7&vV|sBuRE(e@J*A;!hdD_t_Ja-hPxix8Xa`_jV$jje_t2a%TjE4%p_fo}t3 zSMXKNF@GhSDI$?J=eE^OeDlE|0gvI$>Vo=4;e`JYffKtBU|%{|D+A{xV(g8 zl}hmsy9*KhdAC*2-)o*=_g-xQ7&2l{1iVr`eGGQ!I1(lXVp~#o|0LO5hc(}fFK&Qt zHNLK#%>7;N!suxClj>&nd_^P_ek_(KUd`mvr^1U^XA}tV^W1OjsC}2 zZLwOK&~lIs?4E*~?@a5(=!R~nkoJClOuRFHn zz92y?zk(d5a8d3c>_^i0bh3e&r2ty2**oMs%>meFq|Qr#F5t)&O)Y!{Y>LXiCg4R- zUK$yqSWnhR4prLg{rKQc2pW!pmeg-1uIgtr*U=`8{Vs~RkyqG$5yHfooRMTNA!WHtOj_^{beWJje<|@%_S1mZ5O@WSE z8vMGE;Od;K9FKSOv)bjSEY@8VE4+q+Mm(!31(@z(wF%xYv)~Mv0nAnCn2gty1_aXe z^Xt=-eFGRlshtrN)}_z?XkDoS{6Dq^z^%5In({B!JOgSIq#tMlKZ$Pw*D446$gpm1 zSS>KtC$BLl1Ei9WuQpIc6Uu*Hq?ayvidrex7fvdLb!fGSZ zb(+N(BrFq3KoynVwCO7BerH<}d#6nP08zJ_Z{j#h0za#bC;f zwRI?Uolyh>@he;T>RFQG1jvg#ks<&;FG2)xv(1p~K8VfRK#qhJw+J|&n4m+SKBaa& z03QrI!Sq&)8$16K4Gs160MiWfB60`V)o9VFAS+w*5H}N6u9~ z4Go2=d5nn9N;>hLv&WxsW~JDvrl2Q^mo_&Kqv=XHvnG#2n_d9VFZ#}p9#+hsnY2P*sBmJ$1sdK-B{1%hRei3vCNq2;1Vv8rP5 z$EyJ;6L_!Mo```Y@TKr#N#hBB_I=2a=2oFd57x>W2;TAIw!{mSIS5o|FVrrgcVkVZ zBdcvrHxnxE_k9ZIi7W#gbQ;XUK~s*yq5bF|P<$zx`qT%2v(c9_XRz)DU<4$z+_*PQ z$UNs@q6Da1G<#erh{IfefW|rXHo=6$PI#$Vl~XH}fV(~{ee&koSvNT%oJ$b0-LS-iC$Y;X$L8Y@(Tm? zig1{%YyY*14NqzMERGtAi>8wp4QIL*``xn+Yu@T(iB|I4lCKAY+6#{run6%vlQw-Y zeeMO{w;4gv$A1t|X|tBRCg#QgoTo7Bj${djzi=rA)m}Z}O(_0p`e;R%j{2dvfBmFq z%WY>f^s$-GF5a(4&!t?Ba+r0QvrX)mo5Ni{#kj_f|Hzi1Wd6o|If65w_DJizHAYc@ zDke;QNP{CKqS0st#g_C&@kv}RGx_!JH{_BdhYU_Py?zVd`i!2m_D@;;bu3}q<1n`` znKHat>9TgIB`jgA`&t}@La8$tfi;%%Z1PFH)IefE!87?Y z{5|D!N#7Kj_mZyFLcL^v>`}Qo$@W3pkH)k5rTxWay+MjVGlr@l+l@ETig*jEqbGtNye_ zRX?{}^^R}jS?lZTHsh(bAsWKSh5Jz&ZQgVl{euNY2Ed}fn}uu3PP}(hygoo z6WjSAb8cZ?O`A^7s`g3+<8(CbEyV|bH_c5BdQ9@fB@XHeSdV|W%T>EvRjilpJy_tB zA+|Hmh}cw$GVhe)(Pj?B!LugVBSkMRJi9SE%vlA{zVdu`hBcU#^}|fe<`g!On==r| zkD4nlFIRt`9dri_I6Gx`KwwG!E9bPwkaBcsl*@!L-~Nr8x6W$`cMD$wnPd%v-(;_x z$ZEFkM@`EZt(&V|aQj(zE<`LLFDUVxc|k$JL$EkZeW|*Hd@#tNbMl28zVu!3hp%X5FCRCPd!YO2ydg+i z-)|>ja;YS$&wV_pxFa$uit8fgCdYugE-ZRvj=Gp^KJHN7{=&4!xOIWumOfs_C|qRV zQXrE&2Y$WRzgO5jyO6w94`n)IiVClOJHDaDfB@2k#hLH8;K^k6B4DUM_VR*Tee%iH zpTfaOyF0IM-R3PSIcNHTz>HpENA9(jJ5PGaa<91qh$s6$daP#0BgYXgi`~-B$HXg> zOBEGWV*(F7?e10kAUG$e&ET~x4g~zc2o=ZpWpc8 z1kT3$l`A#_0S1&(TX)aUUmkIjFDz6#Bd-2B$KeCtM2ZsSu|3X=Gf~d@k>6y=F`gzL zCX|)d+*;I!lNWFFmC=nyc)<&FVN-0kXM@IJj*W@gE82F(t7h zxMB*I-niNRg1xz`l=zp#*0)mH`vYN%&5fDmeQvgIohCTw68Fo~`l*}WoYFcL-`fyR z6LMM;UGNtY&=GR(=FtjSd91F!|I4_VOF~V@o0JK$sTn>BFJU-$~8P%e79qdWN~54YqQXg=IqReAJsRWl)7 zT>c~>RX?!{Tmuz`TfpDn-ec z9%sgv299`MfIUkZC#VTu+#cihploj|*OswSig54gm1Fw8L)Iv%FLLKnM!F2|@zkG1 zaZg<>3^?NzWzJ0`I3a_B{V0Ppt_gGgN6xlbdb5@BM0^zPt`-ug0|{ z+We(Zf z+)##h4A#hxfMxmt=OgDne<8vc&YQU8SN0wH0&~>RZ_XOIvi~LlH*}_jMU^fiJ}UY5mFrKi zT|D&|HyF9=hl`I|S$brM#{QSN1^gGGher5YMLr59{W67rpC$^b(=$t|tJ6EXzhBF)9n&@7MJj* zLG&fB<89%u4qaI%gEDMV+lg|9_Kh|~=d%a5GZ zj$>?)Q6+%D_M?7qK69Wn3$&4`K=V5R1CbWYt0)esQ0Bm<5_nbLi!j`V*tzC7Jm7-Q z%__}jQ?T-q|KHm%ObJ1ItG7j#|BW{euIdFMw87h3Z!Y~?U<5VluHj;CU0A>Ep4NF295?Q+S&=V2k*chTN@(COai%EUW?oIzYxHpEZoj5QbNGmOY}w_B;+Q^~KrQS!G$-;Lc15qp&Nqzbnrg7#Q$_ z`>;Up<2G6kPj$`UDd?QDJqhX9=ix0wnjn$P1O9y(kn9`r-hnfxilt>X85x-}I1}*n zQ3&e>n|EHWSEK&hdB&_n^z^(E-dk}1v+s?sfLZZvC#T}sU%&Q`Z}8%?N%MgXw=*Ml zRWq2Y1hJ|nw?n?&W%yp2-oz+5J)Qf^xl05$2`4t?U`VcVColyDmP6-u8XT+G`x9dg z8Wu`;f8v3Py2%pHM2xlH$TeUzye_I_G$c69r^8`HNz6vTV2K5(w1*6)S&?AezhX^XdGyDpFw{ixe zE4UI`0bp23LQ8A1G95!BiglbvvhQq$+zb3)2Z8hmesS&GHP~?yMJ*cZVl#UKK3^7Y z2b*@?i9*lGD4b?Q7Qw$MaD}+$_uF@~Qm6Zxy@sTJj)LzYpIXcF&Aq>pC;E4)ufT!R zndzs=XVc@KGX(1dAJzmR9+QFo?La~7gwEoUf)~CS4=*Y;j?L! z%)$IycZrb%B~!fm*@q7wvM@5r020Roh|>0GWMLsU6|fwDT7)UV>9)tPtYHuE zq;Py#ue!KBqNoF%So|p&=qD^`L>$1I>|HfBR@R8(v-ZG5Gxpp5wy_#;YJ@e=FR$Xh zb;}4`ez~7Nf8PK0F4t?MeuD&TA*)Q+!*TFl!In{a=SzZf?pg3PW`Kk?5m2HIuw!9@ zLd7fWuF1y1Wn4n42=()4&CODOFUQOJmg~*jpkXQ<$~-5MCW^&OL1+efHl5KwvxtctS!#f@MrO z)+Sqgt?3LK0oQ7d%=Zr7MsC=P1lx>r^YT;xnd?a7SxsSrm0}z8?`TjzyRos+Iei4+ zUk4y%d7$%=asC3%(9T1;#YWW{Zt;zDY0ad@xWT1U-U-zIrnv=iuNK$kmaVv9uwV`W zHH3A+)20W~`8m$w&9Dy>X0nR@+dqgXg205U1ArpuD>loJV(Wh$l-=0e>}nqYup<{N z1}wOS7ta!-MeCp1(c=*$V)qAMn#W|lcAuqA)?+sS|4Cc?;vywxB5K(}un!K2viv;P zR(AAXi3cOOw(#5`G7Tqi>DHGY;jBo<18y>}7-?a;!P%n&(0F&nbb;*ye)@Ud6TLul zbO|mlzA+d4Lh-Wqn9vc}x+MdvwOpA%Spel4TxB>~5(Cu>io+Tor#RyH;^%Waq2 z%@7zZs*L{auPM(UFlkQ{(nu5HnRcwyHerg<>b{t2UI@1dQU-vbdx3tbrD*w#`@dtI zh#*NCe8?4eH>*CePI$jPrSGyYPwg-f&5eYABdAnx!LAiuzY$%Bd-~;jo^+@II~I z%kTFr#kV(Fa$Edsz z8)HKPr%ACQYb-hpWpM?#ZsSphU=d)dWnjouyO^KGttWg-SGnZ5VV!5Gt|!o5dBNAD zYfjz-`_@f75T@4VyG7AD#wM_H1X`%T<~o7Z2;nT46V!Naw%zGcK7_c@BcSXW#+w1q;l!FBJ!{3kmI{2%_K% zKYEV44@YF;b^HyPVT&IkSr{qJ5$(4Glb8Rerz;PHI{m|q--I1ej&W>@l_-rhSY?HB zq-CPiTmzl5c z`+n}{eV^y)&6Klvt+OgeZiDT%`N< z%!pMr2l*Mg@ivO&ah{rTcya$7%fBoBjT^L?4R)73A1XpbJXoDwaY;$&R}JH2cT?Mg zt!YN{f-S@tr6wCm&AT(s-|Notds*`_uA!{I9=oS+yl`!E(5$TDrjunx22O_@b!(l? z&BMEscwn{vbaY|rz-E%RHXP_VqLUwhQU4=@46OOT7#^S9ers?{l|fZm50XZh6@8Km^*eGW3BJNW10AB?tP#dO}#yqV2fwqzUu&(OT)eY z;D4x^d2bvQ7ncOy_LND5rJTT*$CC%872p9^XD7Eyb)1Wc*i7SN(SvIk4pPd`JIO}Ng$Fr=Y-4Y8WUibyd;6P&Ew%pEP$5T)> zJMcay-##>8B7L5G991W0b*vm_`@*36lUpbpTza2bh@q`}!CNDX1#Qhq;3N2H` zPJfH($-iiDRG*A}qbgVjwglX_#L94|%}bv~gB#bXB2y3YY;XX~y}!3iMxE2sgIO;l zIG6`_e7W;njvlcEN3f4mD({elypz+%JlcR@JjBxPgRN`Gc*@5P6*5fQ_Clee&tGF< z3V=lQ>uwZe*DJgp&EiIk!Q0}XfStjVYHda1pt0*+`7X?=xk*>4FiBs9or;UwlZ@uu z=+C_NNRQBeQ(swod1V5Q_>-^BnSKG`ylM?_=gk5F?p8qba14fj$%W0u8~+v;XP2UD4&7eW^sku|EYJ!~>_HvbPEWkd?b{6- z9QF0}`QK2P=`>N!<=2Jx-^Wi)olpWfXtR;erp5KdQ|{NG@}>s`1r?)(rBw~ASM0o! z2p+6^-7ZR0s^Aw%VYi9xD%<@U6qH%UYXQ)B4omU-wLK#B>`Vad5Zp_9y}fytE_o9( z;|05}lEApx5lt^09L4`k$M2|3N#_@#pg8s{(^9O68^4ebiC>5F%tAt{xDH*G9&}T` zY6ID;5qhVNK$-AO6(1l%bHUxxmBKwm?|*ot;9hi}B{wbY-TDH}Q6t69dVn(*k@8bl zHU<&F*jNJDtF`jSm(sTi&0wYw^L0hy`_o@zj?zH%13W8cps23

CKD#O(N<MN~u-PdeF6}bZnu{fOXbRCh6L~Y%+7%xzTURjEKxC&&# z=fN6liR3JA&lHhrC^e9*2iT?wTrU}bhuChC$Ksp@tNd>5VekqMA;%Ex>EylPn*@}G z(srQC+)Sg&a}}r%!=FZe5dKF?z&(2M)7*y+fV?GpeZf*FxEWX;srTI+V)G@Wv?!@3 z{}BCFzzUeOa&XkfRcv>Xd)5=+gdnQh&v$RiAahs}mDOV)VlIZF(^04BW%%-NdkK_v zzn^YWCB3DrUcK5Hj&_n4*r9MbV=0IA{DPY00EpO&F-G^~Yf2JEtp}rCwRK0W_;c24 zs0$~81FQ)!X2jFD3{g&c7$BDm(#09arIi-lI4q1x#+(|kl(}Smsk`~|1_S1m;;vLa z__mv}LTD#VyHGDKyrzhJ`-5pB{@2&nll^`!KC#b2Cf0|<`+c*ap%dJ*;kE)Pa5H_e z0VBpB<5STDHiJ|fQS4k@diPgt#m>6%=P^L<7y$tr#Qwi^Y+Kd=;jWLmn(c_PW-#K> z&t~;DI=SVJUJ&K5ZmBGrLA`r9;{Z71Lo7!{K6~PdgQUuS>&RQ+l1>~kbMhQk9%I|m zGM&~)s+c$f&aD;nSB+DAgd6nj9+BXo4=C>i4-`C?O+~ARu2W57w=mWG(p04K9(&^s z*=+6_AK_eO39{3`+qbDMCZ;XxX*AmPebhFS{Mr=x*^X`WCUn$$QFZ9!-pj4^mm)r! zKzK2^qq9>(UEMPMm-j|NgPCXQDAY-|t!p4Ih$}(5j*xiNM{M2NRM#Vr`%UQgDkNpe zm77B<2uj{~E?4XEu-AE%PXcxzFc_8VK2+!s)^3GVD1Ut zf$RRKNkewS5avk0?OEN#A>XzHsl^vgr3}gqAgBsJ@0rls>sBJ44}#w}QcY!LKfJiZ zPpWl3A|UE-^qB)7)J(=X1Sa~})tK`bIw|3t#jZUZj^8t_-NM}>&(m&&ehTR>*?+!# z*AeW^(Wz0dq2BV6-hdZ=s9Ndxf|7#r1FEs^H;a5*O0N+NPC6e=Al-*Sc)fNX(foH6 zxH=7Y<*K8<*0 GW API resources. To learn more about it refer to: [Ingress Migration](https://gateway-api.sigs.k8s.io/guides/migrating-from-ingress/#migrating-from-ingress) + + +### Resource Models in Gateway API +There are 3 main resource models in gateway apis: +1. GatewayClass - Mostly managed by a controller. +2. Gateway - An instance of traffic handling infra like a LB. +3. Routes - Defines HTTP-specific rules for mapping traffic from a Gateway listener to a representation of backend network endpoints. + +**k8s Gateway API is NOT the same as API Gateways** +While both sound the same, API Gateway is a more of a general concept that defines a set of resources that exposes capabilities of a backend service but also provide other functionalities like traffic management, rate limiting, authentication and more. It is geared towards commercial API management and monetisation. + +From the gateway api sig: + +!!! note + + Most Gateway API implementations are API Gateways to some extent, but not all API Gateways are Gateway API implementations. + + +### Controller: NGINX Gateway Fabric +[NGINX Gateway Fabric](https://github.com/nginxinc/nginx-gateway-fabric) is an open-source project that provides an implementation of the Gateway API using nginx as the data plane. + +Chart Install: https://github.com/nginxinc/nginx-gateway-fabric/blob/main/deploy/helm-chart/values.yaml + +Create the Namespace +``` +kubectl create ns nginx-gateway +``` + +First Install the Gateway API Resource from Kubernetes +``` +kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml +``` + +Next, Install the NGINX Gateway Fabric controller +``` +cd /opt/genestack/submodules/nginx-gateway-fabric + +helm upgrade --install nginx-gateway-fabric . --namespace=nginx-gateway -f /opt/genestack/helm-configs/nginx-gateway-fabric/helm-overrides.yaml +``` + +Helm install does not automatically upgrade the crds for this resource. To upgrade the crds you will have to manually install them. Follow the process from : [Upgrade CRDs](https://docs.nginx.com/nginx-gateway-fabric/installation/installing-ngf/helm/#upgrade-nginx-gateway-fabric-crds) + +### Example Implementation with Prometheus UI + +In this example we will look at how Prometheus UI is exposed through the gateway. For other services the gateway kustomization file for the service. + +First, create the shared gateway and then the httproute resource for prometheus. +``` +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: flex-gateway +spec: + gatewayClassName: nginx + listeners: + - name: http + port: 80 + protocol: HTTP + hostname: "*.sjc.ohthree.com" +``` + +then + +``` +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: prometheus-gateway-route +spec: + parentRefs: + - name: flex-gateway + sectionName: http + hostnames: + - "prometheus.sjc.ohthree.com" + rules: + - backendRefs: + - name: kube-prometheus-stack-prometheus + port: 9090 +``` +At this point, flex-gateway has a listener pointed to the port 80 matching *.sjc.ohthree.com hostname. The HTTPRoute resource configures routes for this gateway. Here, we match all path and simply pass any request from the matching hostname to kube-prometheus-stack-prometheus backend service. + +### Exposing Flex Services + +We have a requirement to expose a service + + 1. Internally for private consumption (Management and Administrative Services) + 2. Externally to customers (mostly Openstack services) + +![Flex Service Expose External with F5 Loadbalancer](assets/images/flexingress.png) + +For each externally exposed service, example: keystone endpoint, we have a GatewayAPI resource setup to use listeners on services with matching rules based on hostname, for example keystone.sjc.api.rackspacecloud.com. When a request comes in to the f5 vip for this the vip is setup to pass the traffic to the Metallb external vip address. Metallb then forwards the traffic to the appropriate service endpoint for the gateway controller which matches the hostname and passes the traffic onto the right service. The same applies to internal services. Anything that matches ohthree.com hostname can be considered internal and handled accordingly. + +``` +External Traffic -> F5 VIP Address -> MetalLB VIP Address -> Gateway Service + +``` + +This setup can be expended to have multiple MetalLB VIPs with multiple Gateway Services listening on different IP addresses as required by your setup. + +!!! tip + + The metalLB speaker wont advertise the service if : + + 1. There is no active endpoint backing the service + + 2. There are no matching L2 or BGP speaker nodes + + 3. If the service has external Traffic Policy set to local you need to have the running endpoint on the speaker node. + + +### Cross Namespace Routing + +Gateway API has support for multi-ns and cross namespace routing. Routes can be deployed into different Namespaces and Routes can attach to Gateways across Namespace boundaries. This allows user access control to be applied differently across Namespaces for Routes and Gateways, effectively segmenting access and control to different parts of the cluster-wide routing configuration. + +See: https://gateway-api.sigs.k8s.io/guides/multiple-ns/ for more information on cross namespace routing. diff --git a/helm-configs/nginx-gateway-fabric/helm-overrides.yaml b/helm-configs/nginx-gateway-fabric/helm-overrides.yaml new file mode 100644 index 00000000..87b62ae7 --- /dev/null +++ b/helm-configs/nginx-gateway-fabric/helm-overrides.yaml @@ -0,0 +1,117 @@ +nginxGateway: + ## The kind of the NGINX Gateway Fabric installation - currently, only deployment is supported. + kind: deployment + ## gatewayClassName is the name of the GatewayClass that will be created as part of this release. Every NGINX Gateway + ## Fabric must have a unique corresponding GatewayClass resource. NGINX Gateway Fabric only processes resources that + ## belong to its class - i.e. have the "gatewayClassName" field resource equal to the class. + gatewayClassName: nginx + ## The name of the Gateway controller. The controller name must be of the form: DOMAIN/PATH. The controller's domain + ## is gateway.nginx.org. + gatewayControllerName: gateway.nginx.org/nginx-gateway-controller + ## The dynamic configuration for the control plane that is contained in the NginxGateway resource. + config: + logging: + ## Log level. Supported values "info", "debug", "error". + level: info + ## The number of replicas of the NGINX Gateway Fabric Deployment. + replicaCount: 1 + ## The configuration for leader election. + leaderElection: + ## Enable leader election. Leader election is used to avoid multiple replicas of the NGINX Gateway Fabric + ## reporting the status of the Gateway API resources. If not enabled, all replicas of NGINX Gateway Fabric + ## will update the statuses of the Gateway API resources. + enable: true + ## The name of the leader election lock. A Lease object with this name will be created in the same Namespace as + ## the controller. Autogenerated if not set or set to "". + lockName: "" + + ## Defines the settings for the control plane readiness probe. This probe returns Ready when the controller + ## has started and configured NGINX to serve traffic. + readinessProbe: + ## Enable the /readyz endpoint on the control plane. + enable: true + ## Port in which the readiness endpoint is exposed. + port: 8081 + ## The number of seconds after the Pod has started before the readiness probes are initiated. + initialDelaySeconds: 3 + + image: + ## The NGINX Gateway Fabric image to use + repository: ghcr.io/nginxinc/nginx-gateway-fabric + tag: 1.1.0 + pullPolicy: IfNotPresent + + securityContext: + ## Some environments may need this set to true in order for the control plane to successfully reload NGINX. + allowPrivilegeEscalation: false + + ## The lifecycle of the nginx-gateway container. + lifecycle: {} + + ## extraVolumeMounts are the additional volume mounts for the nginx-gateway container. + extraVolumeMounts: [] + +nginx: + ## The NGINX image to use + image: + repository: ghcr.io/nginxinc/nginx-gateway-fabric/nginx + tag: 1.1.0 + pullPolicy: IfNotPresent + + ## The lifecycle of the nginx container. + lifecycle: {} + + ## extraVolumeMounts are the additional volume mounts for the nginx container. + extraVolumeMounts: [] + +## The termination grace period of the NGINX Gateway Fabric pod. +terminationGracePeriodSeconds: 30 + +## Tolerations for the NGINX Gateway Fabric pod. +tolerations: [] + +## The affinity of the NGINX Gateway Fabric pod. +affinity: {} + +serviceAccount: + annotations: {} + ## The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC. + ## Autogenerated if not set or set to "". + # name: nginx-gateway + +service: + ## Creates a service to expose the NGINX Gateway Fabric pods. + create: true + ## The type of service to create for the NGINX Gateway Fabric. + type: LoadBalancer + ## The externalTrafficPolicy of the service. The value Local preserves the client source IP. + externalTrafficPolicy: Local + ## The annotations of the NGINX Gateway Fabric service. + annotations: + "metallb.universe.tf/address-pool": "openstack-external" + "metallb.universe.tf/allow-shared-ip": "openstack-external-svc" + + ## A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from + ## your Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports. + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + +metrics: + ## Enable exposing metrics in the Prometheus format. + enable: true + ## Set the port where the Prometheus metrics are exposed. Format: [1024 - 65535] + port: 9113 + ## Enable serving metrics via https. By default metrics are served via http. + ## Please note that this endpoint will be secured with a self-signed certificate. + secure: false + +## extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with +## nginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers. +extraVolumes: [] diff --git a/mkdocs.yml b/mkdocs.yml index c0cda4dc..76b14ad7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -168,6 +168,7 @@ nav: - infrastructure-ovn.md - OVN Setup: infrastructure-ovn-setup.md - MetalLB: infrastructure-metallb.md + - Gateway API: infrastructure-gateway-api.md - Loki: infrastructure-loki.md - OpenStack: - openstack-overview.md diff --git a/submodules/nginx-gateway-fabric b/submodules/nginx-gateway-fabric new file mode 160000 index 00000000..4e3d9c4b --- /dev/null +++ b/submodules/nginx-gateway-fabric @@ -0,0 +1 @@ +Subproject commit 4e3d9c4bcc7f65fc2671beffa92ca081644325f6 From bcafd11baabfb83c9a0e0e3d4f19ca0bbaddd154 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:38:31 -0500 Subject: [PATCH 15/20] build(deps-dev): bump black from 24.1.1 to 24.3.0 (#167) Bumps [black](https://github.com/psf/black) from 24.1.1 to 24.3.0. - [Release notes](https://github.com/psf/black/releases) - [Changelog](https://github.com/psf/black/blob/main/CHANGES.md) - [Commits](https://github.com/psf/black/compare/24.1.1...24.3.0) --- updated-dependencies: - dependency-name: black dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index e024e590..53f502b7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,7 +1,7 @@ ansible-compat==4.1.11 ansible-lint==24.2.0 attrs==23.2.0 -black==24.1.1 +black==24.3.0 bracex==2.4 click==8.1.7 filelock==3.13.1 From d0058f57cb4974a1a0829858d7493e3bade377c8 Mon Sep 17 00:00:00 2001 From: Chris Blumentritt Date: Wed, 20 Mar 2024 13:40:51 -0500 Subject: [PATCH 16/20] =?UTF-8?q?Initial=20commit=20of=20files=20required?= =?UTF-8?q?=20to=20install=20grafana=20using=20helm=20and=20ku=E2=80=A6=20?= =?UTF-8?q?(#164)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial commit of files required to install grafana using helm and kustomize Signed-off-by: Chris Blumentritt * Adding namespace manifest Signed-off-by: Chris Blumentritt * Added example files and updated grafana docs Signed-off-by: Jorge Perez * Added extra line at end of files Signed-off-by: Jorge Perez * Missed updates to the grafana.md file. Signed-off-by: Jorge Perez --------- Signed-off-by: Chris Blumentritt Signed-off-by: Jorge Perez Co-authored-by: Jorge Perez --- docs/grafana.md | 87 +++++++++++-------- .../grafana/base/azure-client-secret.yaml | 9 ++ kustomize/grafana/base/datasources.yaml | 14 +++ kustomize/grafana/base/example-cert.pem | 23 +++++ kustomize/grafana/base/example-key.pem | 27 ++++++ kustomize/grafana/base/grafana-database.yaml | 51 +++++++++++ kustomize/grafana/base/grafana-values.yaml | 58 +++++++++++++ kustomize/grafana/base/kustomization.yaml | 13 +++ kustomize/grafana/base/ns-grafana.yaml | 7 ++ 9 files changed, 252 insertions(+), 37 deletions(-) create mode 100644 kustomize/grafana/base/azure-client-secret.yaml create mode 100644 kustomize/grafana/base/datasources.yaml create mode 100644 kustomize/grafana/base/example-cert.pem create mode 100644 kustomize/grafana/base/example-key.pem create mode 100644 kustomize/grafana/base/grafana-database.yaml create mode 100644 kustomize/grafana/base/grafana-values.yaml create mode 100644 kustomize/grafana/base/kustomization.yaml create mode 100644 kustomize/grafana/base/ns-grafana.yaml diff --git a/docs/grafana.md b/docs/grafana.md index 55db8c6d..2e140acf 100644 --- a/docs/grafana.md +++ b/docs/grafana.md @@ -15,6 +15,13 @@ In order to avoid putting sensative information on the cli, it is recommended to create and use a secret file instead. +You can base64 encode your `client_id` and `client_secret` by using the echo and base64 command: + +``` shell +echo -n "YOUR CLIENT ID OR SECRET" | base64 +``` + +This example file is located at `/opt/genestack/kustomize/grafana/base` example secret file: ``` yaml @@ -31,40 +38,17 @@ type: opaque --- -## Create a datasources yaml - -If you have specific datasources that should be populated when grafana deploys, create a seperate datasource.yaml. The example below shows one way to configure prometheus and loki datasources. - -example datasources yaml file: - -``` yaml -datasources: - datasources.yaml: - apiversion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - url: http://kube-prometheus-stack-prometheus.prometheus.svc.cluster.local:9090 - isdefault: true - - name: loki - type: loki - access: proxy - url: http://loki-gateway.{{ .release.namespace }}.svc.cluster.local:80 - editable: false -``` - ---- - ## Create your ssl files If you are configuring grafana to use tls/ssl, you should create a file for your certificate and a file for your key. After the deployment, these files can be deleted if desired since the cert and key will now be in a Kubernetes secret. Your cert and key files should look something like the following (cert and key example taken from [VMware Docs](https://docs.vmware.com/en/VMware-NSX-Data-Center-for-vSphere/6.4/com.vmware.nsx.admin.doc/GUID-BBC4804F-AC54-4DD2-BF6B-ECD2F60083F6.html "VMware Docs")). +These example files are located in `/opt/genestack/kustomize/grafana/base` + ??? example - === "Cert file" + === "Cert file (example-cert.pem)" ``` -----BEGIN CERTIFICATE----- MIID0DCCARIGAWIBAGIBATANBGKQHKIG9W0BAQUFADB/MQSWCQYDVQQGEWJGUJET @@ -91,7 +75,7 @@ Your cert and key files should look something like the following (cert and key e -----END CERTIFICATE----- ``` - === "Key file" + === "Key file (example-key.pem)" ``` -----BEGIN RSA PRIVATE KEY----- MIIEOWIBAAKCAQEAVPNAPKLIKDVX98KW68LZ8PGARRCYERSNGQPJPIFMVJJE8LUC @@ -124,17 +108,46 @@ Your cert and key files should look something like the following (cert and key e --- -## Add repo and install +## Update datasources.yaml -``` shell -helm repo add grafana https://grafana.github.io/helm-charts -helm repo update -kubectl create ns grafana -kubectl -n grafana create secret tls grafana-tls-public --cert=your_cert_file --key=your_key_file +The datasource.yaml file is located at `/opt/genestack/kustomize/grafana/base` -kubectl -n grafana create secret generic azure-client --type opaque --from-literal=client_id="your_client_id" --from-literal=client_secret="your_client_secret" -or -kubectl -n grafana apply -f azure-secrets.yaml +If you have specific datasources that should be populated when grafana deploys, update the datasource.yaml to use your values. The example below shows one way to configure prometheus and loki datasources. + +example datasources.yaml file: + +``` yaml +datasources: + datasources.yaml: + apiversion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + url: http://kube-prometheus-stack-prometheus.prometheus.svc.cluster.local:9090 + isdefault: true + - name: loki + type: loki + access: proxy + url: http://loki-gateway.{{ $.Release.Namespace }}.svc.cluster.local:80 + editable: false +``` + +--- + +## Update grafana-values.yaml + +The grafana-values.yaml file is located at `/opt/genestack/kustomize/grafana/base` + +You must edit this file to include your specific url and azure tenant id + +--- + +## Create the tls secret and install + +``` shell +kubectl -n grafana create secret tls grafana-tls-public --cert=/opt/genestack/kustomize/grafana/base/cert.pem --key=/opt/genestack/kustomize/grafana/base/key.pem -helm upgrade --install grafana grafana/grafana --namespace grafana --values overrides.yaml -f datasources.yaml --set tenant_id=your_tenant_id --set custom_host=your_url_for_ingress +kubectl kustomize --enable-helm /opt/genestack/kustomize/grafana/base | \ + kubectl -n grafana -f - ``` diff --git a/kustomize/grafana/base/azure-client-secret.yaml b/kustomize/grafana/base/azure-client-secret.yaml new file mode 100644 index 00000000..e707a00c --- /dev/null +++ b/kustomize/grafana/base/azure-client-secret.yaml @@ -0,0 +1,9 @@ +apiversion: v1 +data: + client_id: base64_encoded_client_id + client_secret: base64_encoded_client_secret +kind: secret +metadata: + name: azure-client + namespace: grafana +type: opaque diff --git a/kustomize/grafana/base/datasources.yaml b/kustomize/grafana/base/datasources.yaml new file mode 100644 index 00000000..6ae7e3a3 --- /dev/null +++ b/kustomize/grafana/base/datasources.yaml @@ -0,0 +1,14 @@ +datasources: + datasources.yaml: + apiversion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + url: http://kube-prometheus-stack-prometheus.prometheus.svc.cluster.local:9090 + isdefault: true + - name: loki + type: loki + access: proxy + url: http://loki-gateway.{{ $.Release.Namespace }}.svc.cluster.local:80 + editable: false diff --git a/kustomize/grafana/base/example-cert.pem b/kustomize/grafana/base/example-cert.pem new file mode 100644 index 00000000..90e2af6b --- /dev/null +++ b/kustomize/grafana/base/example-cert.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0DCCARIGAWIBAGIBATANBGKQHKIG9W0BAQUFADB/MQSWCQYDVQQGEWJGUJET +MBEGA1UECAWKU29TZS1TDGF0ZTEOMAWGA1UEBWWFUGFYAXMXDTALBGNVBAOMBERP +BWKXDTALBGNVBASMBE5TQLUXEDAOBGNVBAMMB0RPBWKGQ0EXGZAZBGKQHKIG9W0B +CQEWDGRPBWLAZGLTAS5MCJAEFW0XNDAXMJGYMDM2NTVAFW0YNDAXMJYYMDM2NTVA +MFSXCZAJBGNVBAYTAKZSMRMWEQYDVQQIDAPTB21LLVN0YXRLMSEWHWYDVQQKDBHJ +BNRLCM5LDCBXAWRNAXRZIFB0ESBMDGQXFDASBGNVBAMMC3D3DY5KAW1PLMZYMIIB +IJANBGKQHKIG9W0BAQEFAAOCAQ8AMIIBCGKCAQEAVPNAPKLIKDVX98KW68LZ8PGA +RRCYERSNGQPJPIFMVJJE8LUCOXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWX +SXITRW99HBFAL1MDQYWCUKOEB9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P +1NCVW+6B/AAN9L1G2PQXGRDYC/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYB +AKJQETWWV6DFK/GRDOSED/6BW+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAU +ZKCHSRYC/WHVURX6O85D6QPZYWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWID +AQABO3SWETAJBGNVHRMEAJAAMCWGCWCGSAGG+EIBDQQFFH1PCGVUU1NMIEDLBMVY +YXRLZCBDZXJ0AWZPY2F0ZTADBGNVHQ4EFGQU+TUGFTYN+CXE1WXUQEA7X+YS3BGW +HWYDVR0JBBGWFOAUHMWQKBBRGP87HXFVWGPNLGGVR64WDQYJKOZIHVCNAQEFBQAD +GGEBAIEEMQQHEZEXZ4CKHE5UM9VCKZKJ5IV9TFS/A9CCQUEPZPLT7YVMEVBFNOC0 ++1ZYR4TXGI4+5MHGZHYCIVVHO4HKQYM+J+O5MWQINF1QOAHUO7CLD3WNA1SKCVUV +VEPIXC/1AHZRG+DPEEHT0MDFFOW13YDUC2FH6AQEDCEL4AV5PXQ2EYR8HR4ZKBC1 +FBTUQUSVA8NWSIYZQ16FYGVE+ANF6VXVUIZYVWDRPRV/KFVLNA3ZPNLMMXU98MVH +PXY3PKB8++6U4Y3VDK2NI2WYYLILS8YQBM4327IKMKDC2TIMS8U60CT47MKU7ADY +CBTV5RDKRLAYWM5YQLTIGLVCV7O= +-----END CERTIFICATE----- diff --git a/kustomize/grafana/base/example-key.pem b/kustomize/grafana/base/example-key.pem new file mode 100644 index 00000000..18e01dee --- /dev/null +++ b/kustomize/grafana/base/example-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEOWIBAAKCAQEAVPNAPKLIKDVX98KW68LZ8PGARRCYERSNGQPJPIFMVJJE8LUC +OXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWXSXITRW99HBFAL1MDQYWCUKOE +B9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P1NCVW+6B/AAN9L1G2PQXGRDY +C/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYBAKJQETWWV6DFK/GRDOSED/6B +W+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAUZKCHSRYC/WHVURX6O85D6QPZ +YWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWIDAQABAOIBAFML8CD9A5PMQLW3 +F9BTTQZ1SRL4FVP7CMHSXHVJSJEHWHHCKEE0OBKWTRSGKTSM1XLU5W8IITNHN0+1 +INR+78EB+RRGNGDAXH8DIODKEY+8/CEE8TFI3JYUTKDRLXMBWIKSOUVVIUMOQ3FX +OGQYWQ0Z2L/PVCWY/Y82FFQ3YSC5GAJSBBYSCRG14BQO44ULRELE4SDWS5HCJKYB +EI2B8COMUCQZSOTXG9NILN/JE2BO/I2HGSAWIBGCODBMS8K6TVSSRZMR3KJ5O6J+ +77LGWKH37BRVGBVYVBQ6NWPL0XLG7DUV+7LWEO5QQAPY6AXB/ZBCKQLQU6/EJOVE +YDG5JQECGYEA9KKFTZD/WEVAREA0DZFEJRU8VLNWOAGL7CJAODXQXOS4MCR5MPDT +KBWGFKLFFH/AYUNPBLK6BCJP1XK67B13ETUA3I9Q5T1WUZEOBIKKBLFM9DDQJT43 +UKZWJXBKFGSVFRYPTGZST719MZVCPCT2CZPJEGN3HLPT6FYW3EORNOECGYEAXIOU +JWXCOMUGAB7+OW2TR0PGEZBVVLEGDKAJ6TC/HOKM1A8R2U4HLTEJJCRLLTFW++4I +DDHE2DLER4Q7O58SFLPHWGPMLDEZN7WRLGR7VYFUV7VMAHJGUC3GV9AGNHWDLA2Q +GBG9/R9OVFL0DC7CGJGLEUTITCYC31BGT3YHV0MCGYEA4K3DG4L+RN4PXDPHVK9I +PA1JXAJHEIFEHNAW1D3VWKBSKVJMGVF+9U5VEV+OWRHN1QZPZV4SURI6M/8LK8RA +GR4UNM4AQK4K/QKY4G05LKRIK9EV2CGQSLQDRA7CJQ+JN3NB50QG6HFNFPAFN+J7 +7JUWLN08WFYV4ATPDD+9XQECGYBXIZKZFL+9IQKFOCONVWAZGO+DQ1N0L3J4ITIK +W56CKWXYJ88D4QB4EUU3YJ4UB4S9MIAW/ELEWKZIBWPUPFAN0DB7I6H3ZMP5ZL8Q +QS3NQCB9DULMU2/TU641ERUKAMIOKA1G9SNDKAZUWO+O6FDKIB1RGOBK9XNN8R4R +PSV+AQKBGB+CICEXR30VYCV5BNZN9EFLIXNKAEMJURYCXCRQNVRNUIUBVAO8+JAE +CDLYGS5RTGOLZIB0IVERQWSP3EI1ACGULTS0VQ9GFLQGAN1SAMS40C9KVNS1MLDU +LHIHYPJ8USCVT5SNWO2N+M+6ANH5TPWDQNEK6ZILH4TRBUZAIHGB +-----END RSA PRIVATE KEY----- diff --git a/kustomize/grafana/base/grafana-database.yaml b/kustomize/grafana/base/grafana-database.yaml new file mode 100644 index 00000000..1accfcbf --- /dev/null +++ b/kustomize/grafana/base/grafana-database.yaml @@ -0,0 +1,51 @@ +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: Database +metadata: + name: grafana + namespace: openstack +spec: + # If you want the database to be created with a different name than the resource name + # name: data-custom + mariaDbRef: + name: mariadb-galera + characterSet: utf8 + collate: utf8_general_ci + requeueInterval: 30s + retryInterval: 5s +--- +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: User +metadata: + name: grafana + namespace: openstack +spec: + # If you want the user to be created with a different name than the resource name + # name: user-custom + mariaDbRef: + name: mariadb-galera + passwordSecretKeyRef: + name: grafana-db + key: password + # This field is immutable and defaults to 10 + maxUserConnections: 20 + host: "%" + requeueInterval: 30s + retryInterval: 5s +--- +apiVersion: mariadb.mmontes.io/v1alpha1 +kind: Grant +metadata: + name: grant + namespace: openstack +spec: + mariaDbRef: + name: mariadb-galera + privileges: + - "ALL" + database: "grafana" + table: "*" + username: grafana + grantOption: true + host: "%" + requeueInterval: 30s + retryInterval: 5s diff --git a/kustomize/grafana/base/grafana-values.yaml b/kustomize/grafana/base/grafana-values.yaml new file mode 100644 index 00000000..13b92f7a --- /dev/null +++ b/kustomize/grafana/base/grafana-values.yaml @@ -0,0 +1,58 @@ +#### EDIT THESE TWO VARIABLES WITH YOUR VALUES +custom_host: grafana.example.com # TODO: update this value to the FQDN of your grafana site +tenant_id: 122333 # TODO: update this value to use your Azure Tenant ID +#### + +ingress: + enabled: true + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + path: / + pathType: ImplementationSpecific + + hosts: + - "{{ .Values.custom_host }}" # Ref: custom_host variable above + tls: + - hosts: + - "{{ .Values.custom_host }}" # Ref: custom_host variable above + secretName: grafana-tls-public + + +extraSecretMounts: + - name: azure-client-secret-mount + secretName: azure-client + defaultMode: 0440 + mountPath: /etc/secrets/azure-client + readOnly: true +nodeSelector: + openstack-control-plane: enabled +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ .Values.custom_host }}" # Ref: custom_host variable above + root_url: "https://{{ .Values.custom_host }}" # Ref: custom_host variable above + auth.azuread: + name: Azure AD + enabled: true + allow_sign_up: true + auto_login: false + client_id: $__file{/etc/secrets/azure-client/client_id} + client_secret: $__file{/etc/secrets/azure-client/client_secret} + scopes: openid email profile + auth_url: "https://login.microsoftonline.com/{{ .Values.tenant_id }}/oauth2/v2.0/authorize" + token_url: "https://login.microsoftonline.com/{{ .Values.tenant_id }}/oauth2/v2.0/token" + allowed_organizations: "{{ .Values.tenant_id }}" + role_attribute_strict: false + allow_assign_grafana_admin: false + skip_org_role_sync: false + use_pkce: true diff --git a/kustomize/grafana/base/kustomization.yaml b/kustomize/grafana/base/kustomization.yaml new file mode 100644 index 00000000..f50c4088 --- /dev/null +++ b/kustomize/grafana/base/kustomization.yaml @@ -0,0 +1,13 @@ +resources: + - ns-grafana.yaml + - azure-client-secret.yaml + - grafana-database.yaml + +helmCharts: + - name: grafana + repo: https://grafana.github.io/helm-charts + releaseName: grafana + namespace: grafana + valuesFile: grafana-values.yaml + additionalValuesFiles: + - datasources.yaml diff --git a/kustomize/grafana/base/ns-grafana.yaml b/kustomize/grafana/base/ns-grafana.yaml new file mode 100644 index 00000000..d780fe65 --- /dev/null +++ b/kustomize/grafana/base/ns-grafana.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + kubernetes.io/metadata.name: grafana + name: grafana + name: grafana From 38ecbd319e7095d15e900e59ff0e6cac7834455f Mon Sep 17 00:00:00 2001 From: "phillip.toohill" Date: Wed, 20 Mar 2024 14:14:48 -0500 Subject: [PATCH 17/20] Updating monitoring docs with a getting started page (#168) --- docs/monitoring-getting-started.md | 51 ++++++++++++++++++++++++++ docs/prometheus-monitoring-overview.md | 27 ++++++++++++-- 2 files changed, 75 insertions(+), 3 deletions(-) create mode 100644 docs/monitoring-getting-started.md diff --git a/docs/monitoring-getting-started.md b/docs/monitoring-getting-started.md new file mode 100644 index 00000000..182b1ef1 --- /dev/null +++ b/docs/monitoring-getting-started.md @@ -0,0 +1,51 @@ +# Getting started with genestack monitoring + +In order to begin monitoring your genestack deployment we first need to deploy the core prometheus components + +## Install the Prometheus stack + +Install [Prometheus](prometheus.md) which is part of the kube-prometheus-stack and includes: + +* Prometheus and the Prometheus operator to manage the Prometheus cluster deployment +* AlertManager which allows for alerting configurations to be set in order to notify various services like email or PagerDuty for specified alerting thresholds + +The [Prometheus](prometheus.md) kube-prometheus-stack will also deploy a couple core metric exporters as part of the stack, those include: + +* Node Exporter(Hardware metrics) +* Kube State Exporter(Kubernetes cluster metrics) + +## Install Grafana + +We can then deploy our visualization dashboard Grafana + +* [Install Grafana](grafana.md) + +Grafana is used to visualize various metrics provided by the monitoring system as well as alerts and logs, take a look at the [Grafana](https://grafana.com/) documentation for more information + +## Install the metric exporters + +Now let's deploy our exporters! + +* [Mysql Exporter](prometheus-mysql-exporter.md) +* [RabbitMQ Exporter](prometheus-rabbitmq-exporter.md) +* [Postgres Exporter](prometheus-postgres-exporter.md) +* [Memcached Exporter](prometheus-memcached-exporter.md) +* [Openstack Exporter](prometheus-openstack-metrics-exporter.md) + +## Next steps + +### Configure alert manager + +Configure the alert manager to send the specified alerts to slack as an example, see: [Slack Alerts](alertmanager-slack.md) + +... and more ... + +### Update alerting rules + +Within the genestack repo we can update our alerting rules via the alerting_rules.yaml to fit our needs + +View alerting_rules.yaml in: + +``` shell +less /opt/genestack/kustomize/prometheus/alerting_rules.yaml +``` diff --git a/docs/prometheus-monitoring-overview.md b/docs/prometheus-monitoring-overview.md index 3fbebdf8..4b7da4b6 100644 --- a/docs/prometheus-monitoring-overview.md +++ b/docs/prometheus-monitoring-overview.md @@ -1,8 +1,29 @@ # Prometheus Monitoring Overview -Genestack utilizes Prometheus for monitoring and metrics collection. To read more about Prometheus please take a look at the [upstream docs](https://prometheus.io). +Genestack utilizes Prometheus for monitoring, alerting and metrics collection. To read more about Prometheus please take a look at the [upstream docs](https://prometheus.io). -A high level visual of Prometheus and the various monitoring and alerting components inside Genestack +Components used to monitor and provide alerting and visualization mechanisms for genestack include: -![Prometheus Monitoring Diagram](assets/images/prometheus-monitoring.png) +* Prometheus +* AlertManager +* Grafana + +Prometheus makes use of various metric exporters used to collect monitoring data related to specific services: + +* Node Exporter(Hardware metrics) +* Kube State Exporter(Kubernetes cluster metrics) +* Mysql Exporter(MariaDB/Galera metrics) +* RabbitMQ Exporter(RabbitMQ queue metrics) +* Postgres Exporter(Postgresql metrics) +* Memcached Exporter(Memcached metrics) +* Openstack Exporter(Metrics from various Openstack products) + +Below is a high level visual of Prometheus and the various monitoring and alerting components within genestack + +![Prometheus Monitoring Diagram](assets/images/prometheus-monitoring.png){ align=center : style="filter:drop-shadow(#3c3c3c 0.5rem 0.5rem 10px);" } + + +### Getting started with genestack monitoring + +To get started using monitoring within the genestack ecosystem begin with the [getting started](monitoring-getting-started.md) page From d49012c818875fabd68dfa0ecb36936e8830b00f Mon Sep 17 00:00:00 2001 From: "phillip.toohill" Date: Wed, 20 Mar 2024 17:12:44 -0500 Subject: [PATCH 18/20] Updating monitoring overview with figure for diagram (#169) --- .gitignore | 2 +- docs/prometheus-monitoring-overview.md | 8 ++++---- mkdocs.yml | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index eeb46464..93ab9ae2 100644 --- a/.gitignore +++ b/.gitignore @@ -103,7 +103,7 @@ target/ .env # virtualenv -venv/ +*venv/ ENV/ # molecule diff --git a/docs/prometheus-monitoring-overview.md b/docs/prometheus-monitoring-overview.md index 4b7da4b6..4d0ad6ed 100644 --- a/docs/prometheus-monitoring-overview.md +++ b/docs/prometheus-monitoring-overview.md @@ -19,10 +19,10 @@ Prometheus makes use of various metric exporters used to collect monitoring data * Memcached Exporter(Memcached metrics) * Openstack Exporter(Metrics from various Openstack products) -Below is a high level visual of Prometheus and the various monitoring and alerting components within genestack - -![Prometheus Monitoring Diagram](assets/images/prometheus-monitoring.png){ align=center : style="filter:drop-shadow(#3c3c3c 0.5rem 0.5rem 10px);" } - +

+ ![Prometheus Monitoring Diagram](assets/images/prometheus-monitoring.png){ style="filter:drop-shadow(#3c3c3c 0.5rem 0.5rem 10px);" } +
high level visual of Prometheus and the various monitoring and alerting components within genestack
+
### Getting started with genestack monitoring diff --git a/mkdocs.yml b/mkdocs.yml index 76b14ad7..eb07e292 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -187,6 +187,7 @@ nav: - Ceilometer: openstack-ceilometer.md - Monitoring: - Monitoring Overview: prometheus-monitoring-overview.md + - Getting Started: monitoring-getting-started.md - Grafana: grafana.md - MySQL Exporter: prometheus-mysql-exporter.md - RabbitMQ Exporter: prometheus-rabbitmq-exporter.md From 1635ade806486d584a344034e94b6c53e513f8d9 Mon Sep 17 00:00:00 2001 From: Pratik Bandarkar Date: Thu, 21 Mar 2024 19:24:37 +0000 Subject: [PATCH 19/20] Add Sealed Secrets installation guide and usage example (#152) Added installation instructions for Sealed Secrets using Kustomize along with example --- .../workflows/kustomize-sealed-secrets.yaml | 37 ++ docs/sealed-secrets.md | 76 +++ .../sealed-secrets/base/kustomization.yaml | 12 + kustomize/sealed-secrets/base/namespace.yaml | 8 + kustomize/sealed-secrets/base/values.yaml | 486 ++++++++++++++++++ mkdocs.yml | 1 + 6 files changed, 620 insertions(+) create mode 100644 .github/workflows/kustomize-sealed-secrets.yaml create mode 100644 docs/sealed-secrets.md create mode 100644 kustomize/sealed-secrets/base/kustomization.yaml create mode 100644 kustomize/sealed-secrets/base/namespace.yaml create mode 100644 kustomize/sealed-secrets/base/values.yaml diff --git a/.github/workflows/kustomize-sealed-secrets.yaml b/.github/workflows/kustomize-sealed-secrets.yaml new file mode 100644 index 00000000..1f5e4157 --- /dev/null +++ b/.github/workflows/kustomize-sealed-secrets.yaml @@ -0,0 +1,37 @@ +name: Kustomize GitHub Actions for sealed-secrets + +on: + pull_request: + paths: + - kustomize/sealed-secrets/** + - .github/workflows/kustomize-sealed-secrets.yaml +jobs: + kustomize: + strategy: + matrix: + overlays: + - base + name: Kustomize + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: azure/setup-helm@v3 + with: + version: latest + token: "${{ secrets.GITHUB_TOKEN }}" + id: helm + - name: Kustomize Install + working-directory: /usr/local/bin/ + run: | + if [ ! -f /usr/local/bin/kustomize ]; then + curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | sudo bash + fi + - name: Run Kustomize Build + run: | + kustomize build kustomize/sealed-secrets/${{ matrix.overlays }} --enable-helm --helm-command ${{ steps.helm.outputs.helm-path }} > /tmp/rendered.yaml + - name: Return Kustomize Build + uses: actions/upload-artifact@v2 + with: + name: kustomize-sealed-secrets-artifact-${{ matrix.overlays }} + path: /tmp/rendered.yaml diff --git a/docs/sealed-secrets.md b/docs/sealed-secrets.md new file mode 100644 index 00000000..e46d4dae --- /dev/null +++ b/docs/sealed-secrets.md @@ -0,0 +1,76 @@ +# Sealed Secrets Introduction and Installation Guide + + +Sealed Secrets is a Kubernetes-native solution for securely storing and managing sensitive information within Kubernetes Secrets. It ensures secure secret management by encrypting Kubernetes Secrets and storing them as SealedSecret resources, which can only be decrypted by the cluster itself. + +Sealed Secrets utilizes public-key cryptography to encrypt secrets, enabling safe storage in your version control system. + + +## Installation + +``` shell +cd kustomize/sealed-secrets/base +``` + +- Modify the `values.yaml` file with your desired configurations. Refer to the sample configuration in this directory, already updated for installation. + +``` shell +vi values.yaml +``` + +- Perform the installation: + +``` shell +kubectl kustomize . --enable-helm | kubectl apply -f - +``` + +!!! note + Ensure to take a backup of the `sealed-secrets-keyxxxx` Kubernetes Secret from the sealed-secrets namespace, as it will be required for the restoration process if needed. + +``` +kubectl get secret -n sealed-secrets -l sealedsecrets.bitnami.com/sealed-secrets-key=active -o yaml > sealed-secrets-key.yaml +``` + +## Usage Example: +In this example, we will use Sealed Secrets to encrypt a Grafana certificate from Kubernetes Secret yaml file. + +### Encrypting Kubernetes Secret: +- Kubernetes Secret yaml file containing Grafana certificate: +``` +# cat grafana-cert.yaml +apiVersion: v1 +data: + ca.crt: + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjVENDQVJhZ0F3SUJBZ0lRYjBYbHp2d3JIWTd0MjNBREJ5Y2NnekFLQmdncWhrak9QUVFEQWpBWU1SWXcKRkFZRFZRUURFdzF5WVdOcmMzQmhZMlV1WTI5dE1CNFhEVEkwTURJeU5ERXdOVFExT0ZvWERUTTBNREl5TVRFdwpOVFExT0Zvd0dERVdNQlFHQTFVRUF4TU5jbUZqYTNOd1lXTmxMbU52YlRCWk1CTUdCeXFHU000OUFnRUdDQ3FHClNNNDlBd0VIQTBJQUJPd0owMU1ZTWw4MUNyV1dMODlQQkhvVG5telZCT2xRMkdMMDFTd2JjYXZQVmRCWnVHamIKeFlwR3VKVDd1UG5xdVp4eFZ4djhUSFlPcVVVL1ZYT2ZtdkNqUWpCQU1BNEdBMVVkRHdFQi93UUVBd0lDcERBUApCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUU5weXZnNk1CSWFnZENuOVR1ejZ3SkZDMVIvekFLCkJnZ3Foa2pPUFFRREFnTkpBREJHQWlFQTY5T25ScUZ5SHZQbjJkWFZ6YjBTVFRZY2UxUUZGUEphWXFVYnQrc2kKdG13Q0lRRDE2ODV0UDBKcnZRRnB6NVlPNFdYQ2xEQWxabTgxUWRwN1lWY0FJS1RhbWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNUVENDQWZLZ0F3SUJBZ0lSQUxieTRuVUJoTWlvYkVTS01yVmwrbEl3Q2dZSUtvWkl6ajBFQXdJd0dERVcKTUJRR0ExVUVBeE1OY21GamEzTndZV05sTG1OdmJUQWVGdzB5TkRBek1UVXhNakk0TUROYUZ3MHlPVEF6TVRReApNakk0TUROYU1BQXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEUStvcVhlUVZWCmRSWkFWclM2ekZwMDlONXpDWUJRcS9HRjNNS1NyWnNkK3VNVlFXakIwcXlJcWJRdm9kL0N0NFhMdWx3a3UyWkIKQlg1MFN4NHJMVGhKQ3ExY2VIQ3lnRUZRa1gyekl6dlBkaCtTcFhWUnhMdzhHZW1ramZ5R3VXeVdydkVEa1cxKwpaM0dYOFc0ZzRZVkwyUEhSLzBIOWxSaVVhK2lYMmM0ZkJhVWoyTUQ3bkF6eWRKaEpneU5rQVZqUHFkRGpGay90CmdIS3pDTGhRTjd0d083ZzluU1UwdTJ1aWI4Z0FZeng0aHl1SWtwR3dCL3JNQkFWb0pxV3Y5eFFkVWd2S2w4a0EKbDFydngwaFlveWZETUprWVQ3SkFYZExEWTJRTUNyY0Y3d0poQUMzYThhYXJqRlUwWXFiQ0Z4TCtvRGw3OGxDbwp2akt2NG0wUmliU1ZBZ01CQUFHamFqQm9NQTRHQTFVZER3RUIvd1FFQXdJRm9EQU1CZ05WSFJNQkFmOEVBakFBCk1COEdBMVVkSXdRWU1CYUFGQTJuSytEb3dFaHFCMEtmMU83UHJBa1VMVkgvTUNjR0ExVWRFUUVCL3dRZE1CdUMKR1dkeVlXWmhibUV0YkdGaUxtUmxiVzh1YldzNGN5NXVaWFF3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQU9lRwp4d1l0S1ZUTjVMcmpwbGR6YlVOLzQ3NnFqM0t4NXdZcGlCL0VaalY5QWlFQXRHU3ZJZlJ2R0JGY1lqaWRyNFl1Ckw1S0Rwd21rZkt0eFhuNi9xamF0eG1jPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.key: + LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBMFBxS2wza0ZWWFVXUUZhMHVzeGFkUFRlY3dtQVVLdnhoZHpDa3EyYkhmcmpGVUZvCndkS3NpS20wTDZIZndyZUZ5N3BjSkx0bVFRVitkRXNlS3kwNFNRcXRYSGh3c29CQlVKRjlzeU03ejNZZmtxVjEKVWNTOFBCbnBwSTM4aHJsc2xxN3hBNUZ0Zm1keGwvRnVJT0dGUzlqeDBmOUIvWlVZbEd2b2w5bk9Id1dsSTlqQQorNXdNOG5TWVNZTWpaQUZZejZuUTR4WlA3WUJ5c3dpNFVEZTdjRHU0UFowbE5MdHJvbS9JQUdNOGVJY3JpSktSCnNBZjZ6QVFGYUNhbHIvY1VIVklMeXBmSkFKZGE3OGRJV0tNbnd6Q1pHRSt5UUYzU3cyTmtEQXEzQmU4Q1lRQXQKMnZHbXE0eFZOR0ttd2hjUy9xQTVlL0pRcUw0eXIrSnRFWW0wbFFJREFRQUJBb0lCQVFDR2x0VnJlS1hXdy9Idwp2ZWJuNTNUYW5sb2wvSmlIWERYUTRMenZlcC9NVHlpeEo4OHdCVjdaSlhMR3VwcEI3YkJkNVVneTMvNmJJYzZ2ClZ6RzIzUWpEQWYxazhLeWtTYlhIRGV6RzBvcFNzdURpc1cwOW5GY2UzaEY3eVhZNXpuSUJHZXBmUWVvaTNyeHAKL3pQT09YQi95TmoxUmxCWjRReFRpcXZpSUlSL3RSZmNQcFp2RWFRRHo5RDBwcm5VTG5raXdqZ1FsUVhnWXdITwpFYjRBZTlwaWwzZ3plNnVoeGxOWEc3bE1nYjFoOHZFa0RNOURJK0tqd25tYjF3eEZCSkZEQ2E4dm15ZDZZTThRCnU1bU5JbVc3bmh1bTA3akRid0tXSDgySE5kTWEwT2g4T0RCWENSSkVhMTZ2YXd0NVNCWjJLcVdlbmpaTlUycmwKTzJ2UmRZUUJBb0dCQVAxUzhEeTVWRkVQUHB4RCtLZHJzWGlVcER6Rzl2VGZmS3ZLQ2NBNExpVEJNYTdEdlRNTwpMeFRJaldMekhmZUFPbXBzVngrK3U4S1kzd2txbTBDcWpabzZ3eVpXcWZhZkJ6bUluK3p3Zm9tQmlIazJwZ2tCCjlTdU95VW9Bb0djYSt6TUtyZXpJRjVrc2FaUmlJbERsL2dheWFlVUZyWGhLZUJTbDF0Q3lOVTlOQW9HQkFOTXYKcmkxcllLZkVPeGxOTlpTbVczQzRiZ2RJZlNoRXNYaVcrUkxFYkdqamgwRWN5Vy92SCtrMU5TdU5aZERpUk9BRwpVQmhmT29YSnVYbzJkTlRXdXFuSE9QL2pxUG1tYWRhU3dpejNtUFNqRUppU3hUbFBQMGEyb0Jpa3VTVlAybDFVCkxxa0MrZ1ZEWHhoaXlXUXlKMUNnY0dNb0IyTVI4R0RaZkVXSm9lWnBBb0dCQU9EdjBWUUtPRjFWelFHU3RXdHMKREFVRzc2THNCUU5Bb3dJamYyOElNNmo5UnpGb3EwcDNjTVRpby9EVjhha0FXbDUvWHdsWUluN1RvVkFSWGhRWQpuVzN5ZWJCRVNkMHNMbzBlek9ybVRXV3ArRld4ZWRNTHd2aHZiRHJpdll0d0FOZTh4dDAyZXdYTzB0MG9HbEo5Ck5vZ1p5ai9MUDlKTlJiMEgyT3d0SVhzTkFvR0FNaXRrbEhPcTNaQVhmaFpDZ1ZMWDdEcFVJVFRPVHMrcTNYdjQKSmNZMS91RDJrN2hUL2x4dlYwYUZvQmdTTlFKYjNHQ0RqSmFxMzNlaHNXL1laMnV2b24rcWdkZkNuN1F4OW9DYwowblByaVVwbnVlYzhKVFkzVVFRM21rTWZuTWFRbUpWVUZHQ1pwc0J2aWVxRjcyQ2V5RitrODFsaUQ5NEdIZXZzCnd0UkVldWtDZ1lFQSt1ZExMZllCRitFaDZIVldvT3NXU2lqZCtrTnh4ajhaK2VSMWhOaWxtN1I5RlNkVzJHVEoKY2lvMlIrSDhWU0xudnFjZ29oWXNxZ0N0VXViTnpNbjdlbEt4RkNOOHRaS1lUYnhHcU5IUHJ4WE43M3RQNy83WAp2MWF4UXQvbm5lcDEvaVYzODVBcUZLdGZ6UU9Ua25sdGJBcmxyZzRvRFk4d0NtUmcwTi9aLzJFPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + annotations: + cert-manager.io/alt-names: grafana-lab.demo.mk8s.net + name: grafana + namespace: rackspace-system +type: kubernetes.io/tls +``` +- Download [kubeseal](https://github.com/bitnami-labs/sealed-secrets/releases) binary. +- Use `kubeseal` for the Kuberntes Secret entryption: +``` shell +kubeseal --scope cluster-wide --allow-empty-data -o yaml --controller-namespace rackspace-system < ~/grafana-cert.yaml > encrypted_grafana-cert.yaml +cat encrypted_grafana-cert.yaml +``` +For more options around `kubeseal` please check help page. + +- Upload the encrypted Sealed Secret resource(`encrypted_grafana-cert.yaml`) to your version control system. It can only be decrypted using the secret created during the Sealed Secrets installation. + +### Deploying Kubernetes Secret from Sealed Secret Resource: +- Apply sealed-secret resource(`encrypted_grafana-cert.yaml`): +```shell +kubectl apply -f encrypted_grafana-cert.yaml +``` +- Verify that the Sealed Secret has been created and the Kubernetes Secret has been decrypted: +```shell +kubectl get sealedsecret/grafana -n rackspace-system +kubectl get secret grafana -n rackspace-system +``` diff --git a/kustomize/sealed-secrets/base/kustomization.yaml b/kustomize/sealed-secrets/base/kustomization.yaml new file mode 100644 index 00000000..d8033add --- /dev/null +++ b/kustomize/sealed-secrets/base/kustomization.yaml @@ -0,0 +1,12 @@ +resources: + - './namespace.yaml' +namespace: sealed-secrets +helmGlobals: + chartHome: ../charts/ +helmCharts: +- name: sealed-secrets + includeCRDs: true + releaseName: sealed-secrets + valuesFile: values.yaml + version: 2.14.2 + repo: https://bitnami-labs.github.io/sealed-secrets diff --git a/kustomize/sealed-secrets/base/namespace.yaml b/kustomize/sealed-secrets/base/namespace.yaml new file mode 100644 index 00000000..100ff1eb --- /dev/null +++ b/kustomize/sealed-secrets/base/namespace.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + kubernetes.io/metadata.name: sealed-secrets + name: sealed-secrets + name: sealed-secrets diff --git a/kustomize/sealed-secrets/base/values.yaml b/kustomize/sealed-secrets/base/values.yaml new file mode 100644 index 00000000..a4172609 --- /dev/null +++ b/kustomize/sealed-secrets/base/values.yaml @@ -0,0 +1,486 @@ +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override sealed-secrets.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override sealed-secrets.fullname +## +fullnameOverride: "sealed-secrets-controller" +## @param namespace Namespace where to deploy the Sealed Secrets controller +## +namespace: "" + +## @param extraDeploy [array] Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param commonAnnotations [object] Annotations to add to all deployed resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +commonAnnotations: {} + +## @param commonLabels [object] Labels to add to all deployed resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +commonLabels: {} + +## @section Sealed Secrets Parameters + +## Sealed Secrets image +## ref: https://hub.docker.com/r/bitnami/sealed-secrets-controller/tags +## @param image.registry Sealed Secrets image registry +## @param image.repository Sealed Secrets image repository +## @param image.tag Sealed Secrets image tag (immutable tags are recommended) +## @param image.pullPolicy Sealed Secrets image pull policy +## @param image.pullSecrets [array] Sealed Secrets image pull secrets +## +image: + registry: docker.io + repository: bitnami/sealed-secrets-controller + tag: 0.25.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] +## @param revisionHistoryLimit Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) +## e.g: +revisionHistoryLimit: "" +## @param createController Specifies whether the Sealed Secrets controller should be created +## +createController: true +## @param secretName The name of an existing TLS secret containing the key used to encrypt secrets +## +secretName: "sealed-secrets-key" +## @param updateStatus Specifies whether the Sealed Secrets controller should update the status subresource +## +updateStatus: true +## @param skipRecreate Specifies whether the Sealed Secrets controller should skip recreating removed secrets +## Setting it to true allows to optionally restore backward compatibility in low priviledge +## environments when old versions of the controller did not require watch permissions on secrets +## for secret re-creation. +## +skipRecreate: false +## @param keyrenewperiod Specifies key renewal period. Default 30 days +## e.g +## keyrenewperiod: "720h30m" +## +keyrenewperiod: "0" +## @param rateLimit Number of allowed sustained request per second for verify endpoint +## +rateLimit: "" +## @param rateLimitBurst Number of requests allowed to exceed the rate limit per second for verify endpoint +## +rateLimitBurst: "" +## @param additionalNamespaces List of namespaces used to manage the Sealed Secrets +## +additionalNamespaces: [] +## @param privateKeyAnnotations Map of annotations to be set on the sealing keypairs +## +privateKeyAnnotations: {} +## @param privateKeyLabels Map of labels to be set on the sealing keypairs +## +privateKeyLabels: {} +## @param logInfoStdout Specifies whether the Sealed Secrets controller will log info to stdout +## +logInfoStdout: false +## @param command Override default container command +## +command: [] +## @param args Override default container args +## +args: [] +## Configure extra options for Sealed Secret containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Sealed Secret containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Sealed Secret containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 +## @param startupProbe.enabled Enable startupProbe on Sealed Secret containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## Sealed Secret resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits [object] The resources limits for the Sealed Secret containers +## @param resources.requests [object] The requested resources for the Sealed Secret containers +## +resources: + limits: {} + requests: {} +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled Sealed Secret pods' Security Context +## @param podSecurityContext.fsGroup Set Sealed Secret pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 65534 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param containerSecurityContext.enabled Enabled Sealed Secret containers' Security Context +## @param containerSecurityContext.readOnlyRootFilesystem Whether the Sealed Secret container has a read-only root filesystem +## @param containerSecurityContext.runAsNonRoot Indicates that the Sealed Secret container must run as a non-root user +## @param containerSecurityContext.runAsUser Set Sealed Secret containers' Security Context runAsUser +## @extra containerSecurityContext.capabilities Adds and removes POSIX capabilities from running containers (see `values.yaml`) +## @skip containerSecurityContext.capabilities.drop +## +containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1001 + capabilities: + drop: + - ALL + +## @param podLabels [object] Extra labels for Sealed Secret pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations [object] Annotations for Sealed Secret pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param priorityClassName Sealed Secret pods' priorityClassName +## +priorityClassName: "" +## @param runtimeClassName Sealed Secret pods' runtimeClassName +## +runtimeClassName: "" +## @param affinity [object] Affinity for Sealed Secret pods assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} +## @param nodeSelector [object] Node labels for Sealed Secret pods assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations [array] Tolerations for Sealed Secret pods assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param additionalVolumes [object] Extra Volumes for the Sealed Secrets Controller Deployment +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +additionalVolumes: [] +## @param additionalVolumeMounts [object] Extra volumeMounts for the Sealed Secrets Controller container +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +additionalVolumeMounts: [] +## @param hostNetwork Sealed Secrets pods' hostNetwork +hostNetwork: false +## @param dnsPolicy Sealed Secrets pods' dnsPolicy +dnsPolicy: "" + +## @section Traffic Exposure Parameters + +## Sealed Secret service parameters +## +service: + ## @param service.type Sealed Secret service type + ## + type: ClusterIP + ## @param service.port Sealed Secret service HTTP port + ## + port: 8080 + ## @param service.nodePort Node port for HTTP + ## Specify the nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePort: "" + ## @param service.annotations [object] Additional custom annotations for Sealed Secret service + ## + annotations: {} +## Sealed Secret ingress parameters +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress record generation for Sealed Secret + ## + enabled: false + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.hostname Default host for the ingress record + ## + hostname: sealed-secrets.local + ## @param ingress.path Default path for the ingress record + ## + path: /v1/cert.pem + ## @param ingress.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting the corresponding annotations + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts [array] An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: sealed-secrets.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths [array] An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls [array] TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - sealed-secrets.local + ## secretName: sealed-secrets.local-tls + ## + extraTls: [] + ## @param ingress.secrets [array] Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: sealed-secrets.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## NetworkPolicy Egress configuration + ## + egress: + ## @param networkPolicy.egress.enabled Specifies wheter a egress is set in the NetworkPolicy + ## + enabled: false + ## @param networkPolicy.egress.kubeapiCidr Specifies the kubeapiCidr, which is the only egress allowed. If not set, kubeapiCidr will be found using Helm lookup + ## + kubeapiCidr: "" + ## @param networkPolicy.egress.kubeapiPort Specifies the kubeapiPort, which is the only egress allowed. If not set, kubeapiPort will be found using Helm lookup + ## + kubeapiPort: "" + +## @section Other Parameters + +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.annotations [object] Annotations for Sealed Secret service account + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.labels Extra labels to be added to the ServiceAccount + ## + labels: {} + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the sealed-secrets.fullname template + ## + name: "" +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: true + ## @param rbac.clusterRole Specifies whether the Cluster Role resource should be created + ## + clusterRole: true + ## @param rbac.clusterRoleName Specifies the name for the Cluster Role resource + ## + clusterRoleName: "secrets-unsealer" + ## @param rbac.namespacedRoles Specifies whether the namespaced Roles should be created (in each of the specified additionalNamespaces) + ## + namespacedRoles: false + ## @param rbac.namespacedRolesName Specifies the name for the namesapced Role resource + ## + namespacedRolesName: "secrets-unsealer" + ## @param rbac.labels Extra labels to be added to RBAC resources + ## + labels: {} + ## @param rbac.pspEnabled PodSecurityPolicy + ## + pspEnabled: false + +## @section Metrics parameters + +metrics: + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Specify if a ServiceMonitor will be deployed for Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace where Prometheus Operator is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor + ## + annotations: {} + ## @param metrics.serviceMonitor.interval How frequently to scrape metrics + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.honorLabels Specify if ServiceMonitor endPoints will honor labels + ## + honorLabels: true + ## @param metrics.serviceMonitor.metricRelabelings [array] Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.relabelings [array] Specify general relabeling + ## + relabelings: [] + ## Grafana dashboards configuration + ## + dashboards: + ## @param metrics.dashboards.create Specifies whether a ConfigMap with a Grafana dashboard configuration should be created + ## ref https://github.com/helm/charts/tree/master/stable/grafana#configuration + ## + create: false + ## @param metrics.dashboards.labels Extra labels to be added to the Grafana dashboard ConfigMap + ## + labels: {} + ## @param metrics.dashboards.annotations Annotations to be added to the Grafana dashboard ConfigMap + ## + annotations: {} + ## @param metrics.dashboards.namespace Namespace where Grafana dashboard ConfigMap is deployed + ## + namespace: "" + + ## Sealed Secret Metrics service parameters + ## + service: + ## @param metrics.service.type Sealed Secret Metrics service type + ## + type: ClusterIP + ## @param metrics.service.port Sealed Secret service Metrics HTTP port + ## + port: 8081 + ## @param metrics.service.nodePort Node port for HTTP + ## Specify the nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePort: "" + ## @param metrics.service.annotations [object] Additional custom annotations for Sealed Secret Metrics service + ## + annotations: {} + +## @section PodDisruptionBudget Parameters + +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable The minimum number of pods (non number to omit) + ## + minAvailable: 1 + ## @param pdb.maxUnavailable The maximum number of unavailable pods (non number to omit) + ## + maxUnavailable: "" diff --git a/mkdocs.yml b/mkdocs.yml index eb07e292..83c0323a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -153,6 +153,7 @@ nav: - Secrets: - vault.md - Vault Operator: vault-secrets-operator.md + - Sealed Secrets: sealed-secrets.md - Infrastructure: - infrastructure-overview.md - Namespace: infrastructure-namespace.md From 9013dab2d381e7e0cc482eb1d3a4fb5e471d91ad Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Thu, 21 Mar 2024 19:53:53 -0500 Subject: [PATCH 20/20] feat: introduce maxscale (#166) * feat: introduce maxscale With the release of the mariadb operator v0.25.0 maxscale was introduced to resolve issues with multi-master deployments, enhance scale, and make better use of nodes in the environment. This change creates the maxscale resources and converts our standard deployment systems to use maxscale as the point of ingress. Specific feature we're interested in > point to an existing MaxScale instance. Doing this will delegate tasks such as primary failover to MaxScale. Docs: https://github.com/mariadb-operator/mariadb-operator/blob/main/docs/MAXSCALE.md Related: https://github.com/mariadb-operator/mariadb-operator/releases/tag/v0.0.26 Signed-off-by: Kevin Carter * fix: add annotations to reserve resources This change resolves issue #165. The change adds the helm annotations needed to permit helm to retain resouces even in the event of an uninstall. Resolves: https://github.com/rackerlabs/genestack/issues/165 Signed-off-by: Kevin Carter --------- Signed-off-by: Kevin Carter --- docs/infrastructure-mariadb-connect.md | 6 +- docs/infrastructure-mariadb.md | 16 +++ docs/openstack-skyline.md | 2 +- .../cinder/cinder-helm-overrides.yaml | 2 +- .../glance/glance-helm-overrides.yaml | 2 +- .../gnocchi/gnocchi-helm-overrides.yaml | 2 +- helm-configs/heat/heat-helm-overrides.yaml | 2 +- .../horizon/horizon-helm-overrides.yaml | 2 +- .../keystone/keystone-helm-overrides.yaml | 2 +- .../neutron/neutron-helm-overrides.yaml | 2 +- helm-configs/nova/nova-helm-overrides.yaml | 6 +- .../octavia/octavia-helm-overrides.yaml | 2 +- .../placement/placement-helm-overrides.yaml | 2 +- .../cinder/base/cinder-mariadb-database.yaml | 6 + .../cinder/base/cinder-rabbitmq-queue.yaml | 8 ++ .../glance/base/glance-mariadb-database.yaml | 6 + .../glance/base/glance-rabbitmq-queue.yaml | 8 ++ kustomize/grafana/base/grafana-database.yaml | 14 +- .../heat/base/heat-mariadb-database.yaml | 6 + .../base/horizon-mariadb-database.yaml | 6 + .../base/keystone-mariadb-database.yaml | 6 + .../base/keystone-rabbitmq-queue.yaml | 8 ++ .../mariadb-cluster/aio/kustomization.yaml | 7 + .../mariadb-cluster/base/kustomization.yaml | 1 + .../mariadb-cluster/base/mariadb-galera.yaml | 19 +-- .../base/mariadb-maxscale.yaml | 132 ++++++++++++++++++ .../base/neutron-mariadb-database.yaml | 6 + .../neutron/base/neutron-rabbitmq-queue.yaml | 8 ++ .../nova/base/nova-mariadb-database.yaml | 14 ++ kustomize/nova/base/nova-rabbitmq-queue.yaml | 8 ++ kustomize/octavia/base/octavia-agent.yaml | 4 +- .../base/octavia-mariadb-database.yaml | 6 + .../octavia/base/octavia-rabbitmq-queue.yaml | 8 ++ .../base/placement-mariadb-database.yaml | 6 + 34 files changed, 297 insertions(+), 38 deletions(-) create mode 100644 kustomize/mariadb-cluster/base/mariadb-maxscale.yaml diff --git a/docs/infrastructure-mariadb-connect.md b/docs/infrastructure-mariadb-connect.md index 76f2c9e8..7f00c951 100644 --- a/docs/infrastructure-mariadb-connect.md +++ b/docs/infrastructure-mariadb-connect.md @@ -3,9 +3,9 @@ Sometimes an operator may need to connect to the database to troubleshoot things or otherwise make modifications to the databases in place. The following command can be used to connect to the database from a node within the cluster. ``` shell -mysql -h $(kubectl -n openstack get service mariadb-galera-primary -o jsonpath='{.spec.clusterIP}') \ - -p$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ - -u root +mysql -h $(kubectl -n openstack get service maxscale-galera -o jsonpath='{.spec.clusterIP}') \ + -p$(kubectl --namespace openstack get secret maxscale -o jsonpath='{.data.password}' | base64 -d) \ + -u maxscale-galera-client ``` !!! info diff --git a/docs/infrastructure-mariadb.md b/docs/infrastructure-mariadb.md index 0f9bfbcb..28aa5539 100644 --- a/docs/infrastructure-mariadb.md +++ b/docs/infrastructure-mariadb.md @@ -42,3 +42,19 @@ kubectl --namespace openstack apply -k /opt/genestack/kustomize/mariadb-cluster/ ``` shell kubectl --namespace openstack get mariadbs -w ``` + +## MaxScale + +Within the deployment the OpenStack services use MaxScale for loadlancing and greater reliability. While the MaxScale ecosystem is a good one, there are some limitations that you should be aware of. It is recommended that you review the [MaxScale reference documentation](https://mariadb.com/kb/en/mariadb-maxscale-2302-limitations-and-known-issues-within-mariadb-maxscale) for more about all of the known limitations and potential workarounds available. + +``` mermaid +flowchart TD + A[Connection] ---B{MaxScale} + B ---|ro| C[ES-0] + B ---|rw| D[ES-1] ---|sync| E & C + B ---|ro| E[ES-2] +``` + +### MaxScale GUI + +The MaxScale deployment has access to a built in GUI that can be exposed for further debuging and visibility into the performance of the MariDB backend. For more information on accessing the GUI please refer to the MaxScale documentation that can be found [here](https://mariadb.com/resources/blog/getting-started-with-the-mariadb-maxscale-gui). diff --git a/docs/openstack-skyline.md b/docs/openstack-skyline.md index 761dcd6e..588bff41 100644 --- a/docs/openstack-skyline.md +++ b/docs/openstack-skyline.md @@ -17,7 +17,7 @@ kubectl --namespace openstack \ --from-literal=service-domain="service" \ --from-literal=service-project="service" \ --from-literal=service-project-domain="service" \ - --from-literal=db-endpoint="mariadb-galera-primary.openstack.svc.cluster.local" \ + --from-literal=db-endpoint="maxscale-galera.openstack.svc.cluster.local" \ --from-literal=db-name="skyline" \ --from-literal=db-username="skyline" \ --from-literal=db-password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" \ diff --git a/helm-configs/cinder/cinder-helm-overrides.yaml b/helm-configs/cinder/cinder-helm-overrides.yaml index 9d15d034..136fa62c 100644 --- a/helm-configs/cinder/cinder-helm-overrides.yaml +++ b/helm-configs/cinder/cinder-helm-overrides.yaml @@ -1320,7 +1320,7 @@ endpoints: username: cinder password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /cinder diff --git a/helm-configs/glance/glance-helm-overrides.yaml b/helm-configs/glance/glance-helm-overrides.yaml index b3b19a86..6a5ad87c 100644 --- a/helm-configs/glance/glance-helm-overrides.yaml +++ b/helm-configs/glance/glance-helm-overrides.yaml @@ -589,7 +589,7 @@ endpoints: username: glance password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /glance diff --git a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml index db1c37bb..9110f4e9 100644 --- a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml +++ b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml @@ -622,7 +622,7 @@ endpoints: username: gnocchi password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /gnocchi diff --git a/helm-configs/heat/heat-helm-overrides.yaml b/helm-configs/heat/heat-helm-overrides.yaml index b27640c1..138eb76e 100644 --- a/helm-configs/heat/heat-helm-overrides.yaml +++ b/helm-configs/heat/heat-helm-overrides.yaml @@ -859,7 +859,7 @@ endpoints: username: heat password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /heat diff --git a/helm-configs/horizon/horizon-helm-overrides.yaml b/helm-configs/horizon/horizon-helm-overrides.yaml index 298f8238..4563074f 100644 --- a/helm-configs/horizon/horizon-helm-overrides.yaml +++ b/helm-configs/horizon/horizon-helm-overrides.yaml @@ -7242,7 +7242,7 @@ endpoints: username: horizon password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /horizon diff --git a/helm-configs/keystone/keystone-helm-overrides.yaml b/helm-configs/keystone/keystone-helm-overrides.yaml index 09667ed0..cb6f0481 100644 --- a/helm-configs/keystone/keystone-helm-overrides.yaml +++ b/helm-configs/keystone/keystone-helm-overrides.yaml @@ -972,7 +972,7 @@ endpoints: username: keystone password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /keystone diff --git a/helm-configs/neutron/neutron-helm-overrides.yaml b/helm-configs/neutron/neutron-helm-overrides.yaml index ac8f036f..2e9dbdd0 100644 --- a/helm-configs/neutron/neutron-helm-overrides.yaml +++ b/helm-configs/neutron/neutron-helm-overrides.yaml @@ -2199,7 +2199,7 @@ endpoints: username: neutron password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /neutron diff --git a/helm-configs/nova/nova-helm-overrides.yaml b/helm-configs/nova/nova-helm-overrides.yaml index 450a8a9d..75e07d39 100644 --- a/helm-configs/nova/nova-helm-overrides.yaml +++ b/helm-configs/nova/nova-helm-overrides.yaml @@ -1640,7 +1640,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /nova @@ -1657,7 +1657,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /nova_api @@ -1674,7 +1674,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /nova_cell0 diff --git a/helm-configs/octavia/octavia-helm-overrides.yaml b/helm-configs/octavia/octavia-helm-overrides.yaml index 1a30a9e2..2865d4c9 100644 --- a/helm-configs/octavia/octavia-helm-overrides.yaml +++ b/helm-configs/octavia/octavia-helm-overrides.yaml @@ -466,7 +466,7 @@ endpoints: username: octavia password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /octavia diff --git a/helm-configs/placement/placement-helm-overrides.yaml b/helm-configs/placement/placement-helm-overrides.yaml index 9d85dd6e..f6a2cc8c 100644 --- a/helm-configs/placement/placement-helm-overrides.yaml +++ b/helm-configs/placement/placement-helm-overrides.yaml @@ -206,7 +206,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /placement diff --git a/kustomize/cinder/base/cinder-mariadb-database.yaml b/kustomize/cinder/base/cinder-mariadb-database.yaml index ae676839..94076c21 100644 --- a/kustomize/cinder/base/cinder-mariadb-database.yaml +++ b/kustomize/cinder/base/cinder-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: cinder namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: cinder namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: cinder-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/cinder/base/cinder-rabbitmq-queue.yaml b/kustomize/cinder/base/cinder-rabbitmq-queue.yaml index e72ce2ea..b4e3b4bd 100644 --- a/kustomize/cinder/base/cinder-rabbitmq-queue.yaml +++ b/kustomize/cinder/base/cinder-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: cinder namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: cinder-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "cinder" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: cinder-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: cinder-qq # name of the queue vhost: "cinder" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: cinder-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "cinder" # name of a vhost userReference: diff --git a/kustomize/glance/base/glance-mariadb-database.yaml b/kustomize/glance/base/glance-mariadb-database.yaml index ce92a42c..5f3f540b 100644 --- a/kustomize/glance/base/glance-mariadb-database.yaml +++ b/kustomize/glance/base/glance-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: glance namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: glance namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: glance-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/glance/base/glance-rabbitmq-queue.yaml b/kustomize/glance/base/glance-rabbitmq-queue.yaml index 7ced8174..ec1aa1ca 100644 --- a/kustomize/glance/base/glance-rabbitmq-queue.yaml +++ b/kustomize/glance/base/glance-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: glance namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: glance-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "glance" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: glance-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: glance-qq # name of the queue vhost: "glance" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: glance-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "glance" # name of a vhost userReference: diff --git a/kustomize/grafana/base/grafana-database.yaml b/kustomize/grafana/base/grafana-database.yaml index 1accfcbf..f8e57070 100644 --- a/kustomize/grafana/base/grafana-database.yaml +++ b/kustomize/grafana/base/grafana-database.yaml @@ -1,8 +1,12 @@ -apiVersion: mariadb.mmontes.io/v1alpha1 +apiVersion: k8s.mariadb.com/v1alpha1 kind: Database metadata: name: grafana namespace: openstack + annotations: + helm.sh/resource-policy: keep + labels: + app.kubernetes.io/managed-by: Helm spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -13,11 +17,13 @@ spec: requeueInterval: 30s retryInterval: 5s --- -apiVersion: mariadb.mmontes.io/v1alpha1 +apiVersion: k8s.mariadb.com/v1alpha1 kind: User metadata: name: grafana namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -32,11 +38,13 @@ spec: requeueInterval: 30s retryInterval: 5s --- -apiVersion: mariadb.mmontes.io/v1alpha1 +apiVersion: k8s.mariadb.com/v1alpha1 kind: Grant metadata: name: grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/heat/base/heat-mariadb-database.yaml b/kustomize/heat/base/heat-mariadb-database.yaml index dbced123..76bd3eac 100644 --- a/kustomize/heat/base/heat-mariadb-database.yaml +++ b/kustomize/heat/base/heat-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: heat namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: heat namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: heat-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/horizon/base/horizon-mariadb-database.yaml b/kustomize/horizon/base/horizon-mariadb-database.yaml index 7bc91e33..2daf6706 100644 --- a/kustomize/horizon/base/horizon-mariadb-database.yaml +++ b/kustomize/horizon/base/horizon-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: horizon namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: horizon namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: horizon-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/keystone/base/keystone-mariadb-database.yaml b/kustomize/keystone/base/keystone-mariadb-database.yaml index c9fcd34f..94865021 100644 --- a/kustomize/keystone/base/keystone-mariadb-database.yaml +++ b/kustomize/keystone/base/keystone-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: keystone namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: keystone namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: keystone-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/keystone/base/keystone-rabbitmq-queue.yaml b/kustomize/keystone/base/keystone-rabbitmq-queue.yaml index e55e4192..7972f194 100644 --- a/kustomize/keystone/base/keystone-rabbitmq-queue.yaml +++ b/kustomize/keystone/base/keystone-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: keystone namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: keystone-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "keystone" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: keystone-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: keystone-qq # name of the queue vhost: "keystone" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: keystone-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "keystone" # name of a vhost userReference: diff --git a/kustomize/mariadb-cluster/aio/kustomization.yaml b/kustomize/mariadb-cluster/aio/kustomization.yaml index 5b0a66d9..7e5e7e83 100644 --- a/kustomize/mariadb-cluster/aio/kustomization.yaml +++ b/kustomize/mariadb-cluster/aio/kustomization.yaml @@ -12,3 +12,10 @@ patches: - op: replace path: /spec/galera/enabled value: false + - target: + kind: MaxScale + name: maxscale-galera + patch: |- + - op: replace + path: /spec/replicas + value: 1 diff --git a/kustomize/mariadb-cluster/base/kustomization.yaml b/kustomize/mariadb-cluster/base/kustomization.yaml index f297b151..c074ce60 100644 --- a/kustomize/mariadb-cluster/base/kustomization.yaml +++ b/kustomize/mariadb-cluster/base/kustomization.yaml @@ -1,4 +1,5 @@ resources: - mariadb-configmap.yaml + - mariadb-maxscale.yaml - mariadb-galera.yaml - mariadb-backup.yaml diff --git a/kustomize/mariadb-cluster/base/mariadb-galera.yaml b/kustomize/mariadb-cluster/base/mariadb-galera.yaml index 6fc77a9e..aa737aa3 100644 --- a/kustomize/mariadb-cluster/base/mariadb-galera.yaml +++ b/kustomize/mariadb-cluster/base/mariadb-galera.yaml @@ -28,21 +28,8 @@ spec: runAsUser: 0 # point to an existing MaxScale instance. Doing this will delegate tasks such as primary failover to MaxScale. - # maxScaleRef: - # name: maxscale - - # provision a MaxScale instance and set 'spec.maxScaleRef' automatically. - maxScale: - enabled: false - - kubernetesService: - type: LoadBalancer - annotations: - metallb.universe.tf/address-pool: primary - - connection: - secretName: mxs-galera-conn - port: 3306 + maxScaleRef: + name: maxscale-galera galera: enabled: true @@ -132,8 +119,6 @@ spec: resources: requests: memory: 256Mi - limits: - memory: 16Gi metrics: enabled: true diff --git a/kustomize/mariadb-cluster/base/mariadb-maxscale.yaml b/kustomize/mariadb-cluster/base/mariadb-maxscale.yaml new file mode 100644 index 00000000..350d8ca0 --- /dev/null +++ b/kustomize/mariadb-cluster/base/mariadb-maxscale.yaml @@ -0,0 +1,132 @@ +apiVersion: k8s.mariadb.com/v1alpha1 +kind: MaxScale +metadata: + name: maxscale-galera +spec: + replicas: 3 + + mariaDbRef: + name: mariadb-galera + namespace: openstack + + services: + - name: rw-router + router: readwritesplit + params: + transaction_replay: "true" + transaction_replay_attempts: "10" + transaction_replay_timeout: "5s" + max_slave_connections: "255" + max_replication_lag: "3s" + master_accept_reads: "true" + listener: + name: rw-listener + port: 3306 + protocol: MariaDBProtocol + params: + connection_metadata: "tx_isolation=auto" + suspend: false + suspend: false + - name: rconn-master-router + router: readconnroute + params: + router_options: "master" + max_replication_lag: "3s" + master_accept_reads: "true" + listener: + port: 3307 + - name: rconn-slave-router + router: readconnroute + params: + router_options: "slave" + max_replication_lag: "3s" + listener: + port: 3308 + + monitor: + name: mariadb-monitor + module: galeramon + interval: 2s + cooperativeMonitoring: majority_of_all + params: + disable_master_failback: "false" + available_when_donor: "false" + disable_master_role_setting: "false" + suspend: false + + admin: + port: 8989 + guiEnabled: true + + config: + params: + log_info: "true" + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + accessModes: + - ReadWriteOnce + storageClassName: general + sync: + database: mysql + interval: 5s + timeout: 10s + + auth: + generate: true + adminUsername: mariadb-operator + adminPasswordSecretKeyRef: + name: maxscale + key: password + deleteDefaultAdmin: true + clientUsername: maxscale-galera-client + clientPasswordSecretKeyRef: + name: maxscale + key: password + clientMaxConnections: 1024 + serverUsername: maxscale-galera-server + serverPasswordSecretKeyRef: + name: maxscale + key: password + serverMaxConnections: 1024 + monitorUsername: maxscale-galera-monitor + monitorPasswordSecretKeyRef: + name: maxscale + key: password + monitorMaxConnections: 128 + syncUsername: maxscale-galera-sync + syncPasswordSecretKeyRef: + name: maxscale + key: password + syncMaxConnections: 128 + + securityContext: + allowPrivilegeEscalation: false + + updateStrategy: + type: RollingUpdate + + kubernetesService: + type: LoadBalancer + annotations: + metallb.universe.tf/address-pool: primary + + connection: + secretName: mxs-galera-conn + port: 3306 + + resources: + requests: + memory: 128Mi + + affinity: + enableAntiAffinity: true + + tolerations: + - key: "k8s.mariadb.com/ha" + operator: "Exists" + effect: "NoSchedule" + + podDisruptionBudget: + maxUnavailable: 33% diff --git a/kustomize/neutron/base/neutron-mariadb-database.yaml b/kustomize/neutron/base/neutron-mariadb-database.yaml index 7ae9d6d2..36563b22 100644 --- a/kustomize/neutron/base/neutron-mariadb-database.yaml +++ b/kustomize/neutron/base/neutron-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: neutron namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: neutron namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: neutron-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/neutron/base/neutron-rabbitmq-queue.yaml b/kustomize/neutron/base/neutron-rabbitmq-queue.yaml index 15cb236d..b9617413 100644 --- a/kustomize/neutron/base/neutron-rabbitmq-queue.yaml +++ b/kustomize/neutron/base/neutron-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: neutron namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: neutron-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "neutron" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: neutron-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: neutron-qq # name of the queue vhost: "neutron" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: neutron-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "neutron" # name of a vhost userReference: diff --git a/kustomize/nova/base/nova-mariadb-database.yaml b/kustomize/nova/base/nova-mariadb-database.yaml index 8ee8c90b..150af95d 100644 --- a/kustomize/nova/base/nova-mariadb-database.yaml +++ b/kustomize/nova/base/nova-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: nova namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: Database metadata: name: nova-api namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -33,6 +37,8 @@ kind: Database metadata: name: nova-cell0 namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -48,6 +54,8 @@ kind: User metadata: name: nova namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -66,6 +74,8 @@ kind: Grant metadata: name: nova-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera @@ -83,6 +93,8 @@ kind: Grant metadata: name: nova-api-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera @@ -100,6 +112,8 @@ kind: Grant metadata: name: nova-cell0-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/nova/base/nova-rabbitmq-queue.yaml b/kustomize/nova/base/nova-rabbitmq-queue.yaml index 61077e77..7010af5d 100644 --- a/kustomize/nova/base/nova-rabbitmq-queue.yaml +++ b/kustomize/nova/base/nova-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: nova namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: nova-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "nova" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: nova-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: nova-qq # name of the queue vhost: "nova" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: nova-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "nova" # name of a vhost userReference: diff --git a/kustomize/octavia/base/octavia-agent.yaml b/kustomize/octavia/base/octavia-agent.yaml index 60fc81d8..58fb12ad 100644 --- a/kustomize/octavia/base/octavia-agent.yaml +++ b/kustomize/octavia/base/octavia-agent.yaml @@ -81,7 +81,7 @@ spec: - name: PATH value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/ - name: DEPENDENCY_SERVICE - value: "openstack:mariadb-galera-primary,openstack:keystone-api,openstack:rabbitmq-nodes,openstack:memcached,openstack:neutron-server" + value: "openstack:maxscale-galera,openstack:keystone-api,openstack:rabbitmq-nodes,openstack:memcached,openstack:neutron-server" - name: DEPENDENCY_JOBS value: "octavia-db-sync,octavia-ks-user,octavia-ks-endpoints" - name: DEPENDENCY_DAEMONSET @@ -187,4 +187,4 @@ spec: secretName: octavia-etc defaultMode: 0444 - emptyDir: {} - name: pod-run-octavia \ No newline at end of file + name: pod-run-octavia diff --git a/kustomize/octavia/base/octavia-mariadb-database.yaml b/kustomize/octavia/base/octavia-mariadb-database.yaml index d253d793..66deeea2 100644 --- a/kustomize/octavia/base/octavia-mariadb-database.yaml +++ b/kustomize/octavia/base/octavia-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: octavia namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: octavia namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: octavia-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/octavia/base/octavia-rabbitmq-queue.yaml b/kustomize/octavia/base/octavia-rabbitmq-queue.yaml index a5af8b5b..783061f3 100644 --- a/kustomize/octavia/base/octavia-rabbitmq-queue.yaml +++ b/kustomize/octavia/base/octavia-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: octavia namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: octavia-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "octavia" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: octavia-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: octavia-qq # name of the queue vhost: "octavia" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: octavia-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "octavia" # name of a vhost userReference: diff --git a/kustomize/placement/base/placement-mariadb-database.yaml b/kustomize/placement/base/placement-mariadb-database.yaml index a89862ad..ff6e1c31 100644 --- a/kustomize/placement/base/placement-mariadb-database.yaml +++ b/kustomize/placement/base/placement-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: placement namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: placement namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: placement-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera