diff --git a/.github/workflows/kustomize-prometheus-postgres-exporter.yaml b/.github/workflows/kustomize-prometheus-postgres-exporter.yaml new file mode 100644 index 00000000..bb9d3025 --- /dev/null +++ b/.github/workflows/kustomize-prometheus-postgres-exporter.yaml @@ -0,0 +1,33 @@ +name: Kustomize GitHub Actions for Prometheus PostgresSQL exporter + +on: + pull_request: + paths: + - kustomize/prometheus-postgres-exporter/** + - .github/workflows/kustomize-prometheus-postgres-exporter.yaml +jobs: + kustomize: + name: Kustomize + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: azure/setup-helm@v3 + with: + version: latest + token: "${{ secrets.GITHUB_TOKEN }}" + id: helm + - name: Kustomize Install + working-directory: /usr/local/bin/ + run: | + if [ ! -f /usr/local/bin/kustomize ]; then + curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | sudo bash + fi + - name: Run Kustomize Build + run: | + kustomize build kustomize/prometheus-postgres-exporter/ --enable-helm --helm-command ${{ steps.helm.outputs.helm-path }} > /tmp/rendered.yaml + - name: Return Kustomize Build + uses: actions/upload-artifact@v2 + with: + name: kustomize-prometheus-postgres-exporter-artifact + path: /tmp/rendered.yaml diff --git a/.github/workflows/kustomize-sealed-secrets.yaml b/.github/workflows/kustomize-sealed-secrets.yaml new file mode 100644 index 00000000..1f5e4157 --- /dev/null +++ b/.github/workflows/kustomize-sealed-secrets.yaml @@ -0,0 +1,37 @@ +name: Kustomize GitHub Actions for sealed-secrets + +on: + pull_request: + paths: + - kustomize/sealed-secrets/** + - .github/workflows/kustomize-sealed-secrets.yaml +jobs: + kustomize: + strategy: + matrix: + overlays: + - base + name: Kustomize + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: azure/setup-helm@v3 + with: + version: latest + token: "${{ secrets.GITHUB_TOKEN }}" + id: helm + - name: Kustomize Install + working-directory: /usr/local/bin/ + run: | + if [ ! -f /usr/local/bin/kustomize ]; then + curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | sudo bash + fi + - name: Run Kustomize Build + run: | + kustomize build kustomize/sealed-secrets/${{ matrix.overlays }} --enable-helm --helm-command ${{ steps.helm.outputs.helm-path }} > /tmp/rendered.yaml + - name: Return Kustomize Build + uses: actions/upload-artifact@v2 + with: + name: kustomize-sealed-secrets-artifact-${{ matrix.overlays }} + path: /tmp/rendered.yaml diff --git a/.gitignore b/.gitignore index eeb46464..93ab9ae2 100644 --- a/.gitignore +++ b/.gitignore @@ -103,7 +103,7 @@ target/ .env # virtualenv -venv/ +*venv/ ENV/ # molecule diff --git a/.gitmodules b/.gitmodules index 0bf31b97..64a92954 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,3 +13,6 @@ [submodule "submodules/openstack-exporter"] path = submodules/openstack-exporter url = https://github.com/openstack-exporter/helm-charts +[submodule "submodules/nginx-gateway-fabric"] + path = submodules/nginx-gateway-fabric + url = https://github.com/nginxinc/nginx-gateway-fabric.git diff --git a/dev-requirements.txt b/dev-requirements.txt index e024e590..53f502b7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,7 +1,7 @@ ansible-compat==4.1.11 ansible-lint==24.2.0 attrs==23.2.0 -black==24.1.1 +black==24.3.0 bracex==2.4 click==8.1.7 filelock==3.13.1 diff --git a/docs/examples/alertmanager-encore.md b/docs/alertmanager-encore.md similarity index 98% rename from docs/examples/alertmanager-encore.md rename to docs/alertmanager-encore.md index 282b12ac..572963f0 100644 --- a/docs/examples/alertmanager-encore.md +++ b/docs/alertmanager-encore.md @@ -1,9 +1,10 @@ +# Encore Alerts The following example describes configuration options to send alerts via alertmanager to Rackspace encore, the `Encore UUID` is derived by account where the secret `SECRET KEY` is used per application submitting webhooks: -```yaml +``` yaml global: resolve_timeout: 5m receivers: diff --git a/docs/examples/alertmanager-slack.md b/docs/alertmanager-slack.md similarity index 98% rename from docs/examples/alertmanager-slack.md rename to docs/alertmanager-slack.md index 7a7ef539..eafc3147 100644 --- a/docs/examples/alertmanager-slack.md +++ b/docs/alertmanager-slack.md @@ -1,8 +1,9 @@ +# Slack Alerts The following example describes configuration options to send alerts via alertmanager to slack using a slack hook. -```yaml +``` yaml alertmanager: alertmanagerSpec: image: diff --git a/docs/assets/images/flexingress.png b/docs/assets/images/flexingress.png new file mode 100644 index 00000000..68fa6baf Binary files /dev/null and b/docs/assets/images/flexingress.png differ diff --git a/docs/genestack-getting-started.md b/docs/genestack-getting-started.md index 70f3ea7b..5b2e80fb 100644 --- a/docs/genestack-getting-started.md +++ b/docs/genestack-getting-started.md @@ -2,11 +2,11 @@ # What is Genestack? -Genestack is a complete operations and deployment ecosystem for Kubernetes and OpenStack. The purpose is of +Genestack is a complete operations and deployment ecosystem for Kubernetes and OpenStack. The purpose of this project is to allow hobbyists, operators, and cloud service providers the ability to build, scale, and leverage Open-Infrastructure in new and exciting ways. -Genestack’s inner workings are a blend dark magic — crafted with [Kustomize](https://kustomize.io) and +Genestack’s inner workings are a blend of dark magic — crafted with [Kustomize](https://kustomize.io) and [Helm](https://helm.sh). It’s like cooking with cloud. Want to spice things up? Tweak the `kustomization.yaml` files or add those extra 'toppings' using Helm's style overrides. However, the platform is ready to go with batteries included. @@ -18,7 +18,7 @@ to manage cloud infrastructure in the way you need it. ## Getting Started -Before you can do anything we need to get the code. Because we've sold our soul to the submodule devil, you're going to need to recursively clone the repo into your location. +Before you can do anything, you need to get the code. Because we've sold our soul to the submodule devil, you're going to need to recursively clone the repo into your location. !!! info diff --git a/docs/genestack-upgrade.md b/docs/genestack-upgrade.md index 8832c897..f1c62446 100644 --- a/docs/genestack-upgrade.md +++ b/docs/genestack-upgrade.md @@ -31,3 +31,28 @@ An update is generally the same as an install. Many of the Genestack application * When needing to run an upgrade for the infrastructure operators, consult the operator documentation to validate the steps required. * When needing to run an upgrade for the OpenStack components, simply re-run the `helm` charts as documented in the Genestack installation process. + +## Kubernetes Upgrade Notes + +Over the course of normal operations it's likely that a CRD will change versions, names, or something else. In these cases, should an operator or helm chart not gracefully handle an full upgrade, the `kubectl convert` plugin can be used to make some adjustments where needed. + +!!! example "Converting mmontes CRDs to mariadb official ones" + + ``` shell + kubectl get --namespace openstack crd.namespace -o yaml value > /tmp/value.crd.namespace.yaml + kubectl convert -f /tmp/value.crd.namespace.yaml --output-version new-namespace/VERSION + ``` + +## Kubernetes Finalizers + +When processing an upgrade there may come a time when a finalizer is stuck, typically something that happens when an operator or an api reference is changed. If this happens one way to resolve the issue is to patch the Finalizers. + +!!! warning + + Patching Finalizers could leave orphaned resources. Before patching a finalizer be sure your "ready." + +!!! example "Patching Finalizers" + + ``` shell + kubectl patch $@ --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' + ``` diff --git a/docs/grafana.md b/docs/grafana.md new file mode 100644 index 00000000..2e140acf --- /dev/null +++ b/docs/grafana.md @@ -0,0 +1,153 @@ +# Grafana + +--- + +!!! note + This deployment makes a few assumption: + + * assumes you are using OAuth using Azure + * assumes you are using tls/ssl + * assumes you are using ingress + + If this does not apply to your deployment adjust the overrides.yaml file and skip over any unneeded sections here + +## Create secret client file + +In order to avoid putting sensative information on the cli, it is recommended to create and use a secret file instead. + +You can base64 encode your `client_id` and `client_secret` by using the echo and base64 command: + +``` shell +echo -n "YOUR CLIENT ID OR SECRET" | base64 +``` + +This example file is located at `/opt/genestack/kustomize/grafana/base` +example secret file: + +``` yaml +apiversion: v1 +data: + client_id: base64_encoded_client_id + client_secret: base64_encoded_client_secret +kind: secret +metadata: + name: azure-client + namespace: grafana +type: opaque +``` + +--- + +## Create your ssl files + +If you are configuring grafana to use tls/ssl, you should create a file for your certificate and a file for your key. After the deployment, these files can be deleted if desired since the cert and key will now be in a Kubernetes secret. + +Your cert and key files should look something like the following (cert and key example taken from [VMware Docs](https://docs.vmware.com/en/VMware-NSX-Data-Center-for-vSphere/6.4/com.vmware.nsx.admin.doc/GUID-BBC4804F-AC54-4DD2-BF6B-ECD2F60083F6.html "VMware Docs")). + +These example files are located in `/opt/genestack/kustomize/grafana/base` + +??? example + + === "Cert file (example-cert.pem)" + ``` + -----BEGIN CERTIFICATE----- + MIID0DCCARIGAWIBAGIBATANBGKQHKIG9W0BAQUFADB/MQSWCQYDVQQGEWJGUJET + MBEGA1UECAWKU29TZS1TDGF0ZTEOMAWGA1UEBWWFUGFYAXMXDTALBGNVBAOMBERP + BWKXDTALBGNVBASMBE5TQLUXEDAOBGNVBAMMB0RPBWKGQ0EXGZAZBGKQHKIG9W0B + CQEWDGRPBWLAZGLTAS5MCJAEFW0XNDAXMJGYMDM2NTVAFW0YNDAXMJYYMDM2NTVA + MFSXCZAJBGNVBAYTAKZSMRMWEQYDVQQIDAPTB21LLVN0YXRLMSEWHWYDVQQKDBHJ + BNRLCM5LDCBXAWRNAXRZIFB0ESBMDGQXFDASBGNVBAMMC3D3DY5KAW1PLMZYMIIB + IJANBGKQHKIG9W0BAQEFAAOCAQ8AMIIBCGKCAQEAVPNAPKLIKDVX98KW68LZ8PGA + RRCYERSNGQPJPIFMVJJE8LUCOXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWX + SXITRW99HBFAL1MDQYWCUKOEB9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P + 1NCVW+6B/AAN9L1G2PQXGRDYC/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYB + AKJQETWWV6DFK/GRDOSED/6BW+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAU + ZKCHSRYC/WHVURX6O85D6QPZYWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWID + AQABO3SWETAJBGNVHRMEAJAAMCWGCWCGSAGG+EIBDQQFFH1PCGVUU1NMIEDLBMVY + YXRLZCBDZXJ0AWZPY2F0ZTADBGNVHQ4EFGQU+TUGFTYN+CXE1WXUQEA7X+YS3BGW + HWYDVR0JBBGWFOAUHMWQKBBRGP87HXFVWGPNLGGVR64WDQYJKOZIHVCNAQEFBQAD + GGEBAIEEMQQHEZEXZ4CKHE5UM9VCKZKJ5IV9TFS/A9CCQUEPZPLT7YVMEVBFNOC0 + +1ZYR4TXGI4+5MHGZHYCIVVHO4HKQYM+J+O5MWQINF1QOAHUO7CLD3WNA1SKCVUV + VEPIXC/1AHZRG+DPEEHT0MDFFOW13YDUC2FH6AQEDCEL4AV5PXQ2EYR8HR4ZKBC1 + FBTUQUSVA8NWSIYZQ16FYGVE+ANF6VXVUIZYVWDRPRV/KFVLNA3ZPNLMMXU98MVH + PXY3PKB8++6U4Y3VDK2NI2WYYLILS8YQBM4327IKMKDC2TIMS8U60CT47MKU7ADY + CBTV5RDKRLAYWM5YQLTIGLVCV7O= + -----END CERTIFICATE----- + ``` + + === "Key file (example-key.pem)" + ``` + -----BEGIN RSA PRIVATE KEY----- + MIIEOWIBAAKCAQEAVPNAPKLIKDVX98KW68LZ8PGARRCYERSNGQPJPIFMVJJE8LUC + OXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWXSXITRW99HBFAL1MDQYWCUKOE + B9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P1NCVW+6B/AAN9L1G2PQXGRDY + C/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYBAKJQETWWV6DFK/GRDOSED/6B + W+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAUZKCHSRYC/WHVURX6O85D6QPZ + YWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWIDAQABAOIBAFML8CD9A5PMQLW3 + F9BTTQZ1SRL4FVP7CMHSXHVJSJEHWHHCKEE0OBKWTRSGKTSM1XLU5W8IITNHN0+1 + INR+78EB+RRGNGDAXH8DIODKEY+8/CEE8TFI3JYUTKDRLXMBWIKSOUVVIUMOQ3FX + OGQYWQ0Z2L/PVCWY/Y82FFQ3YSC5GAJSBBYSCRG14BQO44ULRELE4SDWS5HCJKYB + EI2B8COMUCQZSOTXG9NILN/JE2BO/I2HGSAWIBGCODBMS8K6TVSSRZMR3KJ5O6J+ + 77LGWKH37BRVGBVYVBQ6NWPL0XLG7DUV+7LWEO5QQAPY6AXB/ZBCKQLQU6/EJOVE + YDG5JQECGYEA9KKFTZD/WEVAREA0DZFEJRU8VLNWOAGL7CJAODXQXOS4MCR5MPDT + KBWGFKLFFH/AYUNPBLK6BCJP1XK67B13ETUA3I9Q5T1WUZEOBIKKBLFM9DDQJT43 + UKZWJXBKFGSVFRYPTGZST719MZVCPCT2CZPJEGN3HLPT6FYW3EORNOECGYEAXIOU + JWXCOMUGAB7+OW2TR0PGEZBVVLEGDKAJ6TC/HOKM1A8R2U4HLTEJJCRLLTFW++4I + DDHE2DLER4Q7O58SFLPHWGPMLDEZN7WRLGR7VYFUV7VMAHJGUC3GV9AGNHWDLA2Q + GBG9/R9OVFL0DC7CGJGLEUTITCYC31BGT3YHV0MCGYEA4K3DG4L+RN4PXDPHVK9I + PA1JXAJHEIFEHNAW1D3VWKBSKVJMGVF+9U5VEV+OWRHN1QZPZV4SURI6M/8LK8RA + GR4UNM4AQK4K/QKY4G05LKRIK9EV2CGQSLQDRA7CJQ+JN3NB50QG6HFNFPAFN+J7 + 7JUWLN08WFYV4ATPDD+9XQECGYBXIZKZFL+9IQKFOCONVWAZGO+DQ1N0L3J4ITIK + W56CKWXYJ88D4QB4EUU3YJ4UB4S9MIAW/ELEWKZIBWPUPFAN0DB7I6H3ZMP5ZL8Q + QS3NQCB9DULMU2/TU641ERUKAMIOKA1G9SNDKAZUWO+O6FDKIB1RGOBK9XNN8R4R + PSV+AQKBGB+CICEXR30VYCV5BNZN9EFLIXNKAEMJURYCXCRQNVRNUIUBVAO8+JAE + CDLYGS5RTGOLZIB0IVERQWSP3EI1ACGULTS0VQ9GFLQGAN1SAMS40C9KVNS1MLDU + LHIHYPJ8USCVT5SNWO2N+M+6ANH5TPWDQNEK6ZILH4TRBUZAIHGB + -----END RSA PRIVATE KEY----- + ``` + +--- + +## Update datasources.yaml + +The datasource.yaml file is located at `/opt/genestack/kustomize/grafana/base` + +If you have specific datasources that should be populated when grafana deploys, update the datasource.yaml to use your values. The example below shows one way to configure prometheus and loki datasources. + +example datasources.yaml file: + +``` yaml +datasources: + datasources.yaml: + apiversion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + url: http://kube-prometheus-stack-prometheus.prometheus.svc.cluster.local:9090 + isdefault: true + - name: loki + type: loki + access: proxy + url: http://loki-gateway.{{ $.Release.Namespace }}.svc.cluster.local:80 + editable: false +``` + +--- + +## Update grafana-values.yaml + +The grafana-values.yaml file is located at `/opt/genestack/kustomize/grafana/base` + +You must edit this file to include your specific url and azure tenant id + +--- + +## Create the tls secret and install + +``` shell +kubectl -n grafana create secret tls grafana-tls-public --cert=/opt/genestack/kustomize/grafana/base/cert.pem --key=/opt/genestack/kustomize/grafana/base/key.pem + +kubectl kustomize --enable-helm /opt/genestack/kustomize/grafana/base | \ + kubectl -n grafana -f - +``` diff --git a/docs/infrastructure-gateway-api.md b/docs/infrastructure-gateway-api.md new file mode 100644 index 00000000..87147cdc --- /dev/null +++ b/docs/infrastructure-gateway-api.md @@ -0,0 +1,121 @@ +## Gateway API + +Gateway API is L4 and L7 layer routing project in Kubernetes. It represents next generation of k8s Ingress, LB and Service Mesh APIs. For more information on the project see: [Gateway API SIG.](https://gateway-api.sigs.k8s.io/) + +**Move from Ingress to Gateway APIs** +Since Gateway APIs are successor to Ingress Controllers there needs to be a one time migration from Ingress -> GW API resources. To learn more about it refer to: [Ingress Migration](https://gateway-api.sigs.k8s.io/guides/migrating-from-ingress/#migrating-from-ingress) + + +### Resource Models in Gateway API +There are 3 main resource models in gateway apis: +1. GatewayClass - Mostly managed by a controller. +2. Gateway - An instance of traffic handling infra like a LB. +3. Routes - Defines HTTP-specific rules for mapping traffic from a Gateway listener to a representation of backend network endpoints. + +**k8s Gateway API is NOT the same as API Gateways** +While both sound the same, API Gateway is a more of a general concept that defines a set of resources that exposes capabilities of a backend service but also provide other functionalities like traffic management, rate limiting, authentication and more. It is geared towards commercial API management and monetisation. + +From the gateway api sig: + +!!! note + + Most Gateway API implementations are API Gateways to some extent, but not all API Gateways are Gateway API implementations. + + +### Controller: NGINX Gateway Fabric +[NGINX Gateway Fabric](https://github.com/nginxinc/nginx-gateway-fabric) is an open-source project that provides an implementation of the Gateway API using nginx as the data plane. + +Chart Install: https://github.com/nginxinc/nginx-gateway-fabric/blob/main/deploy/helm-chart/values.yaml + +Create the Namespace +``` +kubectl create ns nginx-gateway +``` + +First Install the Gateway API Resource from Kubernetes +``` +kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml +``` + +Next, Install the NGINX Gateway Fabric controller +``` +cd /opt/genestack/submodules/nginx-gateway-fabric + +helm upgrade --install nginx-gateway-fabric . --namespace=nginx-gateway -f /opt/genestack/helm-configs/nginx-gateway-fabric/helm-overrides.yaml +``` + +Helm install does not automatically upgrade the crds for this resource. To upgrade the crds you will have to manually install them. Follow the process from : [Upgrade CRDs](https://docs.nginx.com/nginx-gateway-fabric/installation/installing-ngf/helm/#upgrade-nginx-gateway-fabric-crds) + +### Example Implementation with Prometheus UI + +In this example we will look at how Prometheus UI is exposed through the gateway. For other services the gateway kustomization file for the service. + +First, create the shared gateway and then the httproute resource for prometheus. +``` +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: flex-gateway +spec: + gatewayClassName: nginx + listeners: + - name: http + port: 80 + protocol: HTTP + hostname: "*.sjc.ohthree.com" +``` + +then + +``` +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: prometheus-gateway-route +spec: + parentRefs: + - name: flex-gateway + sectionName: http + hostnames: + - "prometheus.sjc.ohthree.com" + rules: + - backendRefs: + - name: kube-prometheus-stack-prometheus + port: 9090 +``` +At this point, flex-gateway has a listener pointed to the port 80 matching *.sjc.ohthree.com hostname. The HTTPRoute resource configures routes for this gateway. Here, we match all path and simply pass any request from the matching hostname to kube-prometheus-stack-prometheus backend service. + +### Exposing Flex Services + +We have a requirement to expose a service + + 1. Internally for private consumption (Management and Administrative Services) + 2. Externally to customers (mostly Openstack services) + +![Flex Service Expose External with F5 Loadbalancer](assets/images/flexingress.png) + +For each externally exposed service, example: keystone endpoint, we have a GatewayAPI resource setup to use listeners on services with matching rules based on hostname, for example keystone.sjc.api.rackspacecloud.com. When a request comes in to the f5 vip for this the vip is setup to pass the traffic to the Metallb external vip address. Metallb then forwards the traffic to the appropriate service endpoint for the gateway controller which matches the hostname and passes the traffic onto the right service. The same applies to internal services. Anything that matches ohthree.com hostname can be considered internal and handled accordingly. + +``` +External Traffic -> F5 VIP Address -> MetalLB VIP Address -> Gateway Service + +``` + +This setup can be expended to have multiple MetalLB VIPs with multiple Gateway Services listening on different IP addresses as required by your setup. + +!!! tip + + The metalLB speaker wont advertise the service if : + + 1. There is no active endpoint backing the service + + 2. There are no matching L2 or BGP speaker nodes + + 3. If the service has external Traffic Policy set to local you need to have the running endpoint on the speaker node. + + +### Cross Namespace Routing + +Gateway API has support for multi-ns and cross namespace routing. Routes can be deployed into different Namespaces and Routes can attach to Gateways across Namespace boundaries. This allows user access control to be applied differently across Namespaces for Routes and Gateways, effectively segmenting access and control to different parts of the cluster-wide routing configuration. + +See: https://gateway-api.sigs.k8s.io/guides/multiple-ns/ for more information on cross namespace routing. diff --git a/docs/infrastructure-mariadb-connect.md b/docs/infrastructure-mariadb-connect.md index 76f2c9e8..7f00c951 100644 --- a/docs/infrastructure-mariadb-connect.md +++ b/docs/infrastructure-mariadb-connect.md @@ -3,9 +3,9 @@ Sometimes an operator may need to connect to the database to troubleshoot things or otherwise make modifications to the databases in place. The following command can be used to connect to the database from a node within the cluster. ``` shell -mysql -h $(kubectl -n openstack get service mariadb-galera-primary -o jsonpath='{.spec.clusterIP}') \ - -p$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ - -u root +mysql -h $(kubectl -n openstack get service maxscale-galera -o jsonpath='{.spec.clusterIP}') \ + -p$(kubectl --namespace openstack get secret maxscale -o jsonpath='{.data.password}' | base64 -d) \ + -u maxscale-galera-client ``` !!! info diff --git a/docs/infrastructure-mariadb.md b/docs/infrastructure-mariadb.md index 9b637605..3fbf1cb3 100644 --- a/docs/infrastructure-mariadb.md +++ b/docs/infrastructure-mariadb.md @@ -91,3 +91,19 @@ kubectl --namespace openstack apply -k /opt/genestack/kustomize/mariadb-cluster/ ``` shell kubectl --namespace openstack get mariadbs -w ``` + +## MaxScale + +Within the deployment the OpenStack services use MaxScale for loadlancing and greater reliability. While the MaxScale ecosystem is a good one, there are some limitations that you should be aware of. It is recommended that you review the [MaxScale reference documentation](https://mariadb.com/kb/en/mariadb-maxscale-2302-limitations-and-known-issues-within-mariadb-maxscale) for more about all of the known limitations and potential workarounds available. + +``` mermaid +flowchart TD + A[Connection] ---B{MaxScale} + B ---|ro| C[ES-0] + B ---|rw| D[ES-1] ---|sync| E & C + B ---|ro| E[ES-2] +``` + +### MaxScale GUI + +The MaxScale deployment has access to a built in GUI that can be exposed for further debuging and visibility into the performance of the MariDB backend. For more information on accessing the GUI please refer to the MaxScale documentation that can be found [here](https://mariadb.com/resources/blog/getting-started-with-the-mariadb-maxscale-gui). diff --git a/docs/infrastructure-metallb.md b/docs/infrastructure-metallb.md index 944bd764..be8cee67 100644 --- a/docs/infrastructure-metallb.md +++ b/docs/infrastructure-metallb.md @@ -6,7 +6,7 @@ need to be customized to meet the needs of your environment. ## Example LB manifest -```yaml +``` yaml metadata: name: openstack-external namespace: metallb-system diff --git a/docs/infrastructure-ovn-db-backup.md b/docs/infrastructure-ovn-db-backup.md index 248d4391..86866755 100644 --- a/docs/infrastructure-ovn-db-backup.md +++ b/docs/infrastructure-ovn-db-backup.md @@ -34,16 +34,18 @@ The directions in the _Kube-OVN_ documentation use `docker run` to get a working The _Kube-OVN_ documentation directs you to pick the node running the `ovn-central` pod associated with the first IP of the `NODE_IPS` environment variable. You should find the `NODE_IPS` environment variable defined on an `ovn-central` pod or the `ovn-central` _Deployment_. Assuming you can run the `kubectl` commands, the following example gets the node IPs off of one of the the deployment: -``` -$ kubectl get deployment -n kube-system ovn-central -o yaml | grep -A1 'name: NODE_IPS' +``` shell +kubectl get deployment -n kube-system ovn-central -o yaml | grep -A1 'name: NODE_IPS' + - name: NODE_IPS value: 10.130.140.246,10.130.140.250,10.130.140.252 ``` Then find the _k8s_ node with the first IP. You can see your _k8s_ nodes and their IPs with the command `kubectl get node -o wide`: -``` -$ kubectl get node -o wide | grep 10.130.140.246 +``` shell +kubectl get node -o wide | grep 10.130.140.246 + k8s-controller01 Ready control-plane 3d17h v1.28.6 10.130.140.246 Ubuntu 22.04.3 LTS 6.5.0-17-generic containerd://1.7.11 root@k8s-controller01:~# ``` diff --git a/docs/infrastructure-postgresql.md b/docs/infrastructure-postgresql.md index 1ec86d22..23536cf9 100644 --- a/docs/infrastructure-postgresql.md +++ b/docs/infrastructure-postgresql.md @@ -2,7 +2,7 @@ ## Create Secrets -```shell +``` shell kubectl --namespace openstack create secret generic postgresql-identity-admin \ --type Opaque \ --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" @@ -23,7 +23,7 @@ kubectl --namespace openstack create secret generic postgresql-db-audit \ Consider the PVC size you will need for the environment you're deploying in. Make adjustments as needed near `storage.[pvc|archive_pvc].size` and `volume.backup.size` to your helm overrides. -```shell +``` shell cd /opt/genestack/submodules/openstack-helm-infra helm upgrade --install postgresql ./postgresql \ --namespace=openstack \ diff --git a/docs/k8s-config.md b/docs/k8s-config.md index 0f3cb870..f65c6617 100644 --- a/docs/k8s-config.md +++ b/docs/k8s-config.md @@ -8,8 +8,16 @@ Install the `kubectl` tool. ``` shell curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -sudo mv kubectl /usr/local/bin/ -sudo chmod +x /usr/local/bin/kubectl +sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +``` + +### Install the `convert` plugin + +The convert plugin can be used to assist with upgrades. + +``` shell +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert" +sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert ``` ## Retrieve the kube config diff --git a/docs/monitoring-getting-started.md b/docs/monitoring-getting-started.md new file mode 100644 index 00000000..182b1ef1 --- /dev/null +++ b/docs/monitoring-getting-started.md @@ -0,0 +1,51 @@ +# Getting started with genestack monitoring + +In order to begin monitoring your genestack deployment we first need to deploy the core prometheus components + +## Install the Prometheus stack + +Install [Prometheus](prometheus.md) which is part of the kube-prometheus-stack and includes: + +* Prometheus and the Prometheus operator to manage the Prometheus cluster deployment +* AlertManager which allows for alerting configurations to be set in order to notify various services like email or PagerDuty for specified alerting thresholds + +The [Prometheus](prometheus.md) kube-prometheus-stack will also deploy a couple core metric exporters as part of the stack, those include: + +* Node Exporter(Hardware metrics) +* Kube State Exporter(Kubernetes cluster metrics) + +## Install Grafana + +We can then deploy our visualization dashboard Grafana + +* [Install Grafana](grafana.md) + +Grafana is used to visualize various metrics provided by the monitoring system as well as alerts and logs, take a look at the [Grafana](https://grafana.com/) documentation for more information + +## Install the metric exporters + +Now let's deploy our exporters! + +* [Mysql Exporter](prometheus-mysql-exporter.md) +* [RabbitMQ Exporter](prometheus-rabbitmq-exporter.md) +* [Postgres Exporter](prometheus-postgres-exporter.md) +* [Memcached Exporter](prometheus-memcached-exporter.md) +* [Openstack Exporter](prometheus-openstack-metrics-exporter.md) + +## Next steps + +### Configure alert manager + +Configure the alert manager to send the specified alerts to slack as an example, see: [Slack Alerts](alertmanager-slack.md) + +... and more ... + +### Update alerting rules + +Within the genestack repo we can update our alerting rules via the alerting_rules.yaml to fit our needs + +View alerting_rules.yaml in: + +``` shell +less /opt/genestack/kustomize/prometheus/alerting_rules.yaml +``` diff --git a/docs/openstack-ceilometer.md b/docs/openstack-ceilometer.md new file mode 100644 index 00000000..9d67fb4f --- /dev/null +++ b/docs/openstack-ceilometer.md @@ -0,0 +1,80 @@ +# Deploy Ceilometer + +## Create Secrets + +``` shell +kubectl --namespace openstack create secret generic ceilometer-keystone-admin-password \ + --type Opaque \ + --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" +kubectl --namespace openstack create secret generic ceilometer-keystone-test-password \ + --type Opaque \ + --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" +kubectl --namespace openstack create secret generic ceilometer-rabbitmq-password \ + --type Opaque \ + --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" +``` + +## Run the package deployment + +``` shell +cd /opt/genestack/submodules/openstack-helm +helm upgrade --install ceilometer ./ceilometer \ + --namespace=openstack \ + --wait \ + --timeout 10m \ + -f /opt/genestack/helm-configs/ceilometer/ceilometer-helm-overrides.yaml \ + --set endpoints.identity.auth.admin.password="$(kubectl --namespace openstack get secret keystone-admin -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.identity.auth.ceilometer.password="$(kubectl --namespace openstack get secret ceilometer-keystone-admin-password -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.identity.auth.test.password="$(kubectl --namespace openstack get secret ceilometer-keystone-test-password -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.oslo_messaging.auth.admin.username="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.username}' | base64 -d)" \ + --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ + --set endpoints.oslo_messaging.auth.ceilometer.password="$(kubectl --namespace openstack get secret ceilometer-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ + --set conf.ceilometer.oslo_messaging_notifications.transport_url="\ +rabbit://ceilometer:$(kubectl --namespace openstack get secret ceilometer-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/ceilometer"\ + --set conf.ceilometer.notification.messaging_urls.values="{\ +rabbit://ceilometer:$(kubectl --namespace openstack get secret ceilometer-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/ceilometer,\ +rabbit://cinder:$(kubectl --namespace openstack get secret cinder-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/cinder,\ +rabbit://glance:$(kubectl --namespace openstack get secret glance-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/glance,\ +rabbit://heat:$(kubectl --namespace openstack get secret heat-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/heat,\ +rabbit://keystone:$(kubectl --namespace openstack get secret keystone-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/keystone,\ +rabbit://neutron:$(kubectl --namespace openstack get secret neutron-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/neutron,\ +rabbit://nova:$(kubectl --namespace openstack get secret nova-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/nova}" +``` + +!!! tip + + In a production like environment you may need to include production specific files like the example variable file found in `helm-configs/prod-example-openstack-overrides.yaml`. + +## Verify Ceilometer Workers + +As there is no Ceilometer API, we will do a quick validation against the +Gnocchi API via a series of `openstack metric` commands to confirm that +Ceilometer workers are ingesting metric and event data then persisting them +storage. + +### Verify metric resource types exist + +The Ceilomter db-sync job will create the various resource types in Gnocchi. +Without them, metrics can't be stored, so let's verify they exist. The +output should include named resource types and some attributes for resources +like `instance`, `instance_disk`, `network`, `volume`, etc. + +``` shell +kubectl exec -it openstack-admin-client -n openstack -- openstack metric resource-type list +``` + +### Verify metric resources + +Confirm that resources are populating in Gnocchi + +``` shell +kubectl exec -it openstack-admin-client -n openstack -- openstack metric resource list +``` + +### Verify metrics + +Confirm that metrics can be retrieved from Gnocchi + +``` shell +kubectl exec -it openstack-admin-client -n openstack -- openstack metric list +``` diff --git a/docs/openstack-cpu-allocation-ratio.md b/docs/openstack-cpu-allocation-ratio.md new file mode 100644 index 00000000..2b0080b9 --- /dev/null +++ b/docs/openstack-cpu-allocation-ratio.md @@ -0,0 +1,53 @@ +# Nova CPU allocation Guide + +By default openstack helm provide cpu allocation of 3:1. For a production deployment, cpu allocation ratio need to be decided based on multiple factors like: + +1. Workload type: Different workloads have different CPU utilization patterens. For example, web servers might have bursty utilization, while database might have more consistent. +2. Peformance Requirments: Consider the performace requirment of the workloads. Some applications may require dedicated CPU resource to meet required performace SLA, whereas other can share resources. +3. Overhead: Account for the overhead introduced by the operating system, hypervidor and virtulization layer. Sometime compute node are used as hyperconserved nodes as well. This can impact the effective allocation ratio. +4. Peak vs Average Usage: Determin whether to set allocation ratios based on peak or average CPU usage. Peak usages ensure there are enough resources available durig period of high demand, but it may result in underutilization during off-peak hours. +5. Growth and Scalability: Consider future growth and scalability needs when setting CPU allocation ratios. Allocating too liberally may result in wasted resources while allocating too conservatively may lead to resource shortage as the deployment scale. + +Lets consider below two use case to calculate CPU allocation for our deployment with HPE DL380 Server. + +### Case 1: CPU allocation ratio for shared CPU + +Workload type: Considering a flavor with 8 vCPU for workload which will meet its peak demand and required performace. + +Max VM per host: Considering max of 60 VM of such flavor can be hosted on a single hypervisor as per our scaling/growth forcast. + +CPUs on hypervisor: HPE DL380 have 72 PCPU. + + +Example : +``` shell + Total physical CPU (PCPU) = 72 + No. of vCPU per flavor (VCPU) = 8 + No. of Instance per hypervisor (VM) = 60 + Overhead on CPU (OCPU) = 8 + Formula to calculate CPU allocation ratio: + + CAR = VM * VCPU / (PCPU - OPCU) + CAR = 60 * 8 / (72 - 8) + = 480/64 + = ~8 +``` +So here we get approx CPU allocation ratio of 8.1. + +### Case 2: Shared workload with CPU pining: + +There may be requirement to run CPU pinned VM along with floating instances (shared cpus). In such case CPU allocation for compute node will be different from rest of nodes. Lets see how to get cpu allocation for such type of compute nodes: + +Example : +``` shell + No. of CPU dedicated for CPU pinning (RCPUP) : 16 + CPU allocation ratio: + + CAR = VM * VCPU / (PCPU - RCPUP - OCPU) + CAR = 60 * 8 / (72 - 16 - 8) + = 480/48 + = 10 +``` +So, here cpu allocation will be 10.1 on host hosting cpu pinned instances and floating instances. + +Please note , above is an example only. For your use case it is required to considering flavor's CPU specifications based on application benchmark requirments, its peak utilization and scaling needs of future. diff --git a/docs/openstack-floating-ips.md b/docs/openstack-floating-ips.md index d8cc37e9..37dc0fea 100644 --- a/docs/openstack-floating-ips.md +++ b/docs/openstack-floating-ips.md @@ -1,11 +1,11 @@ # Openstack Floating Ips -To read more about Openstack Floating Ips using the [upstream docs](https://docs.openstack.org/python-openstackclient/pike/cli/command-objects/floating-ip.html). +To read more about Openstack Floating Ips using the [upstream docs](https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/floating-ip.html). #### List and view floating ips ``` shell -$ openstack floating ip list +openstack floating ip list [--network ] [--port ] [--fixed-ip-address ] @@ -18,7 +18,7 @@ $ openstack floating ip list #### Create a floating ip ``` shell -$ openstack floating ip create +openstack floating ip create [--subnet ] [--port ] [--floating-ip-address ] @@ -36,7 +36,7 @@ $ openstack floating ip create ``` shell -$ openstack floating ip delete [ ...] +openstack floating ip delete [ ...] ``` #### Floating ip set @@ -44,7 +44,7 @@ $ openstack floating ip delete [ ...] Set floating IP properties ``` shell -$ openstack floating ip set +openstack floating ip set --port [--fixed-ip-address ] @@ -53,13 +53,13 @@ $ openstack floating ip set #### Display floating ip details ``` shell -$ openstack floating ip show +openstack floating ip show ``` #### Unset floating IP Properties ``` shell -$ openstack floating ip unset +openstack floating ip unset --port ``` @@ -71,7 +71,7 @@ You can assign a floating IP address to a project and to an instance. Associate an IP address with an instance in the project, as follows: ``` shell -$ openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS +openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS ``` #### Disassociate floating IP addresses @@ -79,10 +79,10 @@ $ openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS To disassociate a floating IP address from an instance: ``` shell -$ openstack server remove floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS +openstack server remove floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS ``` To remove the floating IP address from a project: ``` shell -$ openstack floating ip delete FLOATING_IP_ADDRESS +openstack floating ip delete FLOATING_IP_ADDRESS ``` diff --git a/docs/openstack-gnocchi.md b/docs/openstack-gnocchi.md index 7f28dbb8..3961d0ee 100644 --- a/docs/openstack-gnocchi.md +++ b/docs/openstack-gnocchi.md @@ -2,7 +2,7 @@ ## Create Secrets -```shell +``` shell kubectl --namespace openstack create secret generic gnocchi-admin \ --type Opaque \ --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" @@ -22,7 +22,7 @@ options for ceph. The below simply creates the expected `ceph-etc` ConfigMap with the ceph.conf needed by Gnocchi to establish a connection to the mon host(s) via the rados client. -```shell +``` shell kubectl apply -n openstack -f - <] + [--ip ] + [--ip6 ] + [--name ] + [--instance-name ] + [--status ] + [--flavor ] + [--image ] + [--host ] + [--all-projects] + [--project ] + [--project-domain ] + [--user ] + [--user-domain ] + [--long] + [-n] + [--marker ] + [--limit ] + [--deleted] + [--changes-since ] +``` + +#### Create a new server + +``` shell +openstack server create + (--image | --volume ) + --flavor + [--security-group ] + [--key-name ] + [--property ] + [--file ] + [--user-data ] + [--availability-zone ] + [--block-device-mapping ] + [--nic ] + [--network ] + [--port ] + [--hint ] + [--config-drive |True] + [--min ] + [--max ] + [--wait] + +``` + +#### Delete a server + +``` shell +openstack server delete [--wait] [ ...] +``` + +# Launch a server from a snapshot + +#### Create a snapshot of the instance + +!!! note + + If necessary, list the instances to view the instance name with the list server command above. + +1. Shut down the source VM before you take the snapshot to ensure that all data is flushed to disk. Use the openstack server stop command to shut down the instance: + + ``` shell + openstack server stop myInstance + ``` + +2. Use the openstack server list command to confirm that the instance shows a SHUTOFF status. + +3. Use the openstack server image create command to take a snapshot: + + ``` shell + openstack server image create myInstance --name myInstanceSnapshot + ``` + + The above command creates the image myInstance by taking a snapshot of a running server. + +4. Use the openstack image list command to check the status until the status is active: + + ``` shell + openstack image list + ``` + +#### Download the snapshot + +!!! note + + Get the image id from the image list command (seen above). + +Download the snapshot by using the image ID: + +``` shell +openstack image save --file snapshot.raw {Image ID} +``` + +Make the image available to the new environment, either through HTTP or direct upload to a machine (scp). + +#### Import the snapshot to the new env + +In the new project or cloud environment, import the snapshot: + +``` shell +openstack image create NEW_IMAGE_NAME \ + --container-format bare --disk-format qcow2 --file IMAGE_URL +``` + +#### Boot a new sever from the snapshot + +In the new project or cloud environment, use the snapshot to create the new instance: + +``` shell +openstack server create --flavor m1.tiny --image myInstanceSnapshot myNewInstance +``` + +# Launch a server from a volume + +#### Boot instance from volume + +You can create a bootable volume from an existing image, volume, or snapshot. This procedure shows you how to create a volume from an image and use the volume to boot an instance. + +1. List available images, noting the ID of the image that you wish to use. + + ``` shell + openstack image list + ``` + +2. Create a bootable volume from the chosen image. + + ``` shell + openstack volume create \ + --image {Image ID} --size 10 \ + test-volume + ``` + +3. Create a server, specifying the volume as the boot device. + + ``` shell + openstack server create \ + --flavor $FLAVOR --network $NETWORK \ + --volume {Volume ID}\ + --wait test-server + ``` + +4. List volumes once again to ensure the status has changed to in-use and the volume is correctly reporting the attachment. + + ``` shell + openstack volume list + ``` + + ``` shell + openstack server volume list test-server + ``` diff --git a/docs/openstack-skyline.md b/docs/openstack-skyline.md index 761dcd6e..588bff41 100644 --- a/docs/openstack-skyline.md +++ b/docs/openstack-skyline.md @@ -17,7 +17,7 @@ kubectl --namespace openstack \ --from-literal=service-domain="service" \ --from-literal=service-project="service" \ --from-literal=service-project-domain="service" \ - --from-literal=db-endpoint="mariadb-galera-primary.openstack.svc.cluster.local" \ + --from-literal=db-endpoint="maxscale-galera.openstack.svc.cluster.local" \ --from-literal=db-name="skyline" \ --from-literal=db-username="skyline" \ --from-literal=db-password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" \ diff --git a/docs/prometheus-monitoring-overview.md b/docs/prometheus-monitoring-overview.md index 3fbebdf8..4d0ad6ed 100644 --- a/docs/prometheus-monitoring-overview.md +++ b/docs/prometheus-monitoring-overview.md @@ -1,8 +1,29 @@ # Prometheus Monitoring Overview -Genestack utilizes Prometheus for monitoring and metrics collection. To read more about Prometheus please take a look at the [upstream docs](https://prometheus.io). +Genestack utilizes Prometheus for monitoring, alerting and metrics collection. To read more about Prometheus please take a look at the [upstream docs](https://prometheus.io). -A high level visual of Prometheus and the various monitoring and alerting components inside Genestack +Components used to monitor and provide alerting and visualization mechanisms for genestack include: -![Prometheus Monitoring Diagram](assets/images/prometheus-monitoring.png) +* Prometheus +* AlertManager +* Grafana + +Prometheus makes use of various metric exporters used to collect monitoring data related to specific services: + +* Node Exporter(Hardware metrics) +* Kube State Exporter(Kubernetes cluster metrics) +* Mysql Exporter(MariaDB/Galera metrics) +* RabbitMQ Exporter(RabbitMQ queue metrics) +* Postgres Exporter(Postgresql metrics) +* Memcached Exporter(Memcached metrics) +* Openstack Exporter(Metrics from various Openstack products) + +
+ ![Prometheus Monitoring Diagram](assets/images/prometheus-monitoring.png){ style="filter:drop-shadow(#3c3c3c 0.5rem 0.5rem 10px);" } +
high level visual of Prometheus and the various monitoring and alerting components within genestack
+
+ +### Getting started with genestack monitoring + +To get started using monitoring within the genestack ecosystem begin with the [getting started](monitoring-getting-started.md) page diff --git a/docs/prometheus-postgres-exporter.md b/docs/prometheus-postgres-exporter.md new file mode 100644 index 00000000..d765821b --- /dev/null +++ b/docs/prometheus-postgres-exporter.md @@ -0,0 +1,18 @@ +# PostgresSQL Exporter + +PostgresSQL Exporter is used to expose metrics from a running PostgresSQL deployment. + +!!! note + + To deploy metric exporters you will first need to deploy the Prometheus Operator, see: ([Deploy Prometheus](prometheus.md)). + +## Installation + +Install the PostgresSQL Exporter + +``` shell +kubectl kustomize --enable-helm /opt/genestack/kustomize/prometheus-postgres-exporter | kubectl -n openstack apply -f - +``` + +!!! success + If the installation is successful, you should see the exporter pod in the openstack namespace. diff --git a/docs/prometheus-rabbitmq-exporter.md b/docs/prometheus-rabbitmq-exporter.md index bcec5324..1c2479b5 100644 --- a/docs/prometheus-rabbitmq-exporter.md +++ b/docs/prometheus-rabbitmq-exporter.md @@ -11,8 +11,7 @@ RabbitMQ Exporter is used to expose metrics from a running RabbitMQ deployment. Install the RabbitMQ Exporter ``` shell -kubectl kustomize --enable-helm /opt/genestack/kustomize/prometheus-rabbitmq-exporter | \ - kubectl -n openstack apply --server-side -f - +kubectl kustomize --enable-helm /opt/genestack/kustomize/prometheus-rabbitmq-exporter | kubectl -n openstack apply --server-side -f - ``` !!! success diff --git a/docs/sealed-secrets.md b/docs/sealed-secrets.md new file mode 100644 index 00000000..e46d4dae --- /dev/null +++ b/docs/sealed-secrets.md @@ -0,0 +1,76 @@ +# Sealed Secrets Introduction and Installation Guide + + +Sealed Secrets is a Kubernetes-native solution for securely storing and managing sensitive information within Kubernetes Secrets. It ensures secure secret management by encrypting Kubernetes Secrets and storing them as SealedSecret resources, which can only be decrypted by the cluster itself. + +Sealed Secrets utilizes public-key cryptography to encrypt secrets, enabling safe storage in your version control system. + + +## Installation + +``` shell +cd kustomize/sealed-secrets/base +``` + +- Modify the `values.yaml` file with your desired configurations. Refer to the sample configuration in this directory, already updated for installation. + +``` shell +vi values.yaml +``` + +- Perform the installation: + +``` shell +kubectl kustomize . --enable-helm | kubectl apply -f - +``` + +!!! note + Ensure to take a backup of the `sealed-secrets-keyxxxx` Kubernetes Secret from the sealed-secrets namespace, as it will be required for the restoration process if needed. + +``` +kubectl get secret -n sealed-secrets -l sealedsecrets.bitnami.com/sealed-secrets-key=active -o yaml > sealed-secrets-key.yaml +``` + +## Usage Example: +In this example, we will use Sealed Secrets to encrypt a Grafana certificate from Kubernetes Secret yaml file. + +### Encrypting Kubernetes Secret: +- Kubernetes Secret yaml file containing Grafana certificate: +``` +# cat grafana-cert.yaml +apiVersion: v1 +data: + ca.crt: + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjVENDQVJhZ0F3SUJBZ0lRYjBYbHp2d3JIWTd0MjNBREJ5Y2NnekFLQmdncWhrak9QUVFEQWpBWU1SWXcKRkFZRFZRUURFdzF5WVdOcmMzQmhZMlV1WTI5dE1CNFhEVEkwTURJeU5ERXdOVFExT0ZvWERUTTBNREl5TVRFdwpOVFExT0Zvd0dERVdNQlFHQTFVRUF4TU5jbUZqYTNOd1lXTmxMbU52YlRCWk1CTUdCeXFHU000OUFnRUdDQ3FHClNNNDlBd0VIQTBJQUJPd0owMU1ZTWw4MUNyV1dMODlQQkhvVG5telZCT2xRMkdMMDFTd2JjYXZQVmRCWnVHamIKeFlwR3VKVDd1UG5xdVp4eFZ4djhUSFlPcVVVL1ZYT2ZtdkNqUWpCQU1BNEdBMVVkRHdFQi93UUVBd0lDcERBUApCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUU5weXZnNk1CSWFnZENuOVR1ejZ3SkZDMVIvekFLCkJnZ3Foa2pPUFFRREFnTkpBREJHQWlFQTY5T25ScUZ5SHZQbjJkWFZ6YjBTVFRZY2UxUUZGUEphWXFVYnQrc2kKdG13Q0lRRDE2ODV0UDBKcnZRRnB6NVlPNFdYQ2xEQWxabTgxUWRwN1lWY0FJS1RhbWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNUVENDQWZLZ0F3SUJBZ0lSQUxieTRuVUJoTWlvYkVTS01yVmwrbEl3Q2dZSUtvWkl6ajBFQXdJd0dERVcKTUJRR0ExVUVBeE1OY21GamEzTndZV05sTG1OdmJUQWVGdzB5TkRBek1UVXhNakk0TUROYUZ3MHlPVEF6TVRReApNakk0TUROYU1BQXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEUStvcVhlUVZWCmRSWkFWclM2ekZwMDlONXpDWUJRcS9HRjNNS1NyWnNkK3VNVlFXakIwcXlJcWJRdm9kL0N0NFhMdWx3a3UyWkIKQlg1MFN4NHJMVGhKQ3ExY2VIQ3lnRUZRa1gyekl6dlBkaCtTcFhWUnhMdzhHZW1ramZ5R3VXeVdydkVEa1cxKwpaM0dYOFc0ZzRZVkwyUEhSLzBIOWxSaVVhK2lYMmM0ZkJhVWoyTUQ3bkF6eWRKaEpneU5rQVZqUHFkRGpGay90CmdIS3pDTGhRTjd0d083ZzluU1UwdTJ1aWI4Z0FZeng0aHl1SWtwR3dCL3JNQkFWb0pxV3Y5eFFkVWd2S2w4a0EKbDFydngwaFlveWZETUprWVQ3SkFYZExEWTJRTUNyY0Y3d0poQUMzYThhYXJqRlUwWXFiQ0Z4TCtvRGw3OGxDbwp2akt2NG0wUmliU1ZBZ01CQUFHamFqQm9NQTRHQTFVZER3RUIvd1FFQXdJRm9EQU1CZ05WSFJNQkFmOEVBakFBCk1COEdBMVVkSXdRWU1CYUFGQTJuSytEb3dFaHFCMEtmMU83UHJBa1VMVkgvTUNjR0ExVWRFUUVCL3dRZE1CdUMKR1dkeVlXWmhibUV0YkdGaUxtUmxiVzh1YldzNGN5NXVaWFF3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQU9lRwp4d1l0S1ZUTjVMcmpwbGR6YlVOLzQ3NnFqM0t4NXdZcGlCL0VaalY5QWlFQXRHU3ZJZlJ2R0JGY1lqaWRyNFl1Ckw1S0Rwd21rZkt0eFhuNi9xamF0eG1jPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.key: + LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBMFBxS2wza0ZWWFVXUUZhMHVzeGFkUFRlY3dtQVVLdnhoZHpDa3EyYkhmcmpGVUZvCndkS3NpS20wTDZIZndyZUZ5N3BjSkx0bVFRVitkRXNlS3kwNFNRcXRYSGh3c29CQlVKRjlzeU03ejNZZmtxVjEKVWNTOFBCbnBwSTM4aHJsc2xxN3hBNUZ0Zm1keGwvRnVJT0dGUzlqeDBmOUIvWlVZbEd2b2w5bk9Id1dsSTlqQQorNXdNOG5TWVNZTWpaQUZZejZuUTR4WlA3WUJ5c3dpNFVEZTdjRHU0UFowbE5MdHJvbS9JQUdNOGVJY3JpSktSCnNBZjZ6QVFGYUNhbHIvY1VIVklMeXBmSkFKZGE3OGRJV0tNbnd6Q1pHRSt5UUYzU3cyTmtEQXEzQmU4Q1lRQXQKMnZHbXE0eFZOR0ttd2hjUy9xQTVlL0pRcUw0eXIrSnRFWW0wbFFJREFRQUJBb0lCQVFDR2x0VnJlS1hXdy9Idwp2ZWJuNTNUYW5sb2wvSmlIWERYUTRMenZlcC9NVHlpeEo4OHdCVjdaSlhMR3VwcEI3YkJkNVVneTMvNmJJYzZ2ClZ6RzIzUWpEQWYxazhLeWtTYlhIRGV6RzBvcFNzdURpc1cwOW5GY2UzaEY3eVhZNXpuSUJHZXBmUWVvaTNyeHAKL3pQT09YQi95TmoxUmxCWjRReFRpcXZpSUlSL3RSZmNQcFp2RWFRRHo5RDBwcm5VTG5raXdqZ1FsUVhnWXdITwpFYjRBZTlwaWwzZ3plNnVoeGxOWEc3bE1nYjFoOHZFa0RNOURJK0tqd25tYjF3eEZCSkZEQ2E4dm15ZDZZTThRCnU1bU5JbVc3bmh1bTA3akRid0tXSDgySE5kTWEwT2g4T0RCWENSSkVhMTZ2YXd0NVNCWjJLcVdlbmpaTlUycmwKTzJ2UmRZUUJBb0dCQVAxUzhEeTVWRkVQUHB4RCtLZHJzWGlVcER6Rzl2VGZmS3ZLQ2NBNExpVEJNYTdEdlRNTwpMeFRJaldMekhmZUFPbXBzVngrK3U4S1kzd2txbTBDcWpabzZ3eVpXcWZhZkJ6bUluK3p3Zm9tQmlIazJwZ2tCCjlTdU95VW9Bb0djYSt6TUtyZXpJRjVrc2FaUmlJbERsL2dheWFlVUZyWGhLZUJTbDF0Q3lOVTlOQW9HQkFOTXYKcmkxcllLZkVPeGxOTlpTbVczQzRiZ2RJZlNoRXNYaVcrUkxFYkdqamgwRWN5Vy92SCtrMU5TdU5aZERpUk9BRwpVQmhmT29YSnVYbzJkTlRXdXFuSE9QL2pxUG1tYWRhU3dpejNtUFNqRUppU3hUbFBQMGEyb0Jpa3VTVlAybDFVCkxxa0MrZ1ZEWHhoaXlXUXlKMUNnY0dNb0IyTVI4R0RaZkVXSm9lWnBBb0dCQU9EdjBWUUtPRjFWelFHU3RXdHMKREFVRzc2THNCUU5Bb3dJamYyOElNNmo5UnpGb3EwcDNjTVRpby9EVjhha0FXbDUvWHdsWUluN1RvVkFSWGhRWQpuVzN5ZWJCRVNkMHNMbzBlek9ybVRXV3ArRld4ZWRNTHd2aHZiRHJpdll0d0FOZTh4dDAyZXdYTzB0MG9HbEo5Ck5vZ1p5ai9MUDlKTlJiMEgyT3d0SVhzTkFvR0FNaXRrbEhPcTNaQVhmaFpDZ1ZMWDdEcFVJVFRPVHMrcTNYdjQKSmNZMS91RDJrN2hUL2x4dlYwYUZvQmdTTlFKYjNHQ0RqSmFxMzNlaHNXL1laMnV2b24rcWdkZkNuN1F4OW9DYwowblByaVVwbnVlYzhKVFkzVVFRM21rTWZuTWFRbUpWVUZHQ1pwc0J2aWVxRjcyQ2V5RitrODFsaUQ5NEdIZXZzCnd0UkVldWtDZ1lFQSt1ZExMZllCRitFaDZIVldvT3NXU2lqZCtrTnh4ajhaK2VSMWhOaWxtN1I5RlNkVzJHVEoKY2lvMlIrSDhWU0xudnFjZ29oWXNxZ0N0VXViTnpNbjdlbEt4RkNOOHRaS1lUYnhHcU5IUHJ4WE43M3RQNy83WAp2MWF4UXQvbm5lcDEvaVYzODVBcUZLdGZ6UU9Ua25sdGJBcmxyZzRvRFk4d0NtUmcwTi9aLzJFPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + annotations: + cert-manager.io/alt-names: grafana-lab.demo.mk8s.net + name: grafana + namespace: rackspace-system +type: kubernetes.io/tls +``` +- Download [kubeseal](https://github.com/bitnami-labs/sealed-secrets/releases) binary. +- Use `kubeseal` for the Kuberntes Secret entryption: +``` shell +kubeseal --scope cluster-wide --allow-empty-data -o yaml --controller-namespace rackspace-system < ~/grafana-cert.yaml > encrypted_grafana-cert.yaml +cat encrypted_grafana-cert.yaml +``` +For more options around `kubeseal` please check help page. + +- Upload the encrypted Sealed Secret resource(`encrypted_grafana-cert.yaml`) to your version control system. It can only be decrypted using the secret created during the Sealed Secrets installation. + +### Deploying Kubernetes Secret from Sealed Secret Resource: +- Apply sealed-secret resource(`encrypted_grafana-cert.yaml`): +```shell +kubectl apply -f encrypted_grafana-cert.yaml +``` +- Verify that the Sealed Secret has been created and the Kubernetes Secret has been decrypted: +```shell +kubectl get sealedsecret/grafana -n rackspace-system +kubectl get secret grafana -n rackspace-system +``` diff --git a/docs/vault-secrets-operator.md b/docs/vault-secrets-operator.md index 8464d971..8749186f 100644 --- a/docs/vault-secrets-operator.md +++ b/docs/vault-secrets-operator.md @@ -4,139 +4,163 @@ The Vault Secrets Operator (VSO) enables Pods to seamlessly consume Vault secret ## Prerequisites -Before starting the installation, ensure the following prerequisites are met: -- **HashiCorp Vault:** Ensure HashiCorp Vault is installed in the cluster. You can refer [vault.md](https://github.com/rackerlabs/genestack/blob/main/docs/vault.md) for more details. +!!! note "Before starting the installation, ensure the following prerequisites are met" + + **HashiCorp Vault:** Ensure HashiCorp Vault is installed in the cluster. You can refer [vault.md](https://github.com/rackerlabs/genestack/blob/main/docs/vault.md) for more details. ## Installation -- Navigate to the Vault Secrets Operator base directory: - ``` shell - cd kustomize/vault-secrets-operator/base - ``` -- Modify the `values.yaml` file with your desired configurations. Refer to the sample configuration in this directory, already updated for installation. - ``` shell - vi values.yaml - ``` +Navigate to the Vault Secrets Operator base directory: + +``` shell +cd kustomize/vault-secrets-operator/base +``` + +Modify the `values.yaml` file with your desired configurations. Refer to the sample configuration in this directory, already updated for installation. + +``` shell +vi values.yaml +``` + +Perform the installation. -- Perform the installation: - ``` shell - kustomize build . --enable-helm | kubectl apply -f - - ``` +``` shell +kustomize build . --enable-helm | kubectl apply -f - +``` ## Consume secrets from the Vault + After installing the `vault-secrets-operator`, create the necessary resources to consume secrets stored in Vault. ### Connect to the vault -- Create a `VaultConnection` resource to establish a connection to Vault: - ``` - apiVersion: secrets.hashicorp.com/v1beta1 - kind: VaultConnection - metadata: - namespace: openstack - name: vault-connection - spec: - # required configuration - # address to the Vault server. - address: https://vault.vault.svc.cluster.local:8200 - - # optional configuration - # HTTP headers to be included in all Vault requests. - # headers: [] - # TLS server name to use as the SNI host for TLS connections. - # tlsServerName: "" - # skip TLS verification for TLS connections to Vault. - skipTLSVerify: false - # the trusted PEM encoded CA certificate chain stored in a Kubernetes Secret - caCertSecretRef: "vault-ca-secret" - ``` - `vault-ca-secret`: CA certificate used to sign the Vault certificate for internal communication. + +Create a `VaultConnection` resource to establish a connection to Vault. + +``` yaml +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultConnection +metadata: +namespace: openstack +name: vault-connection +spec: +# required configuration +# address to the Vault server. +address: https://vault.vault.svc.cluster.local:8200 + +# optional configuration +# HTTP headers to be included in all Vault requests. +# headers: [] +# TLS server name to use as the SNI host for TLS connections. +# tlsServerName: "" +# skip TLS verification for TLS connections to Vault. +skipTLSVerify: false +# the trusted PEM encoded CA certificate chain stored in a Kubernetes Secret +caCertSecretRef: "vault-ca-secret" +``` + +`vault-ca-secret`: CA certificate used to sign the Vault certificate for internal communication. ### Authenticate with vault: -- Create a `VaultAuth` resource to authenticate with Vault and access secrets: - ``` - apiVersion: secrets.hashicorp.com/v1beta1 - kind: VaultAuth - metadata: - name: keystone-auth - namespace: openstack - spec: - method: kubernetes - mount: genestack - kubernetes: - role: osh - serviceAccount: default - audiences: - - vault - vaultConnectionRef: vault-connection - ``` + +Create a `VaultAuth` resource to authenticate with Vault and access secrets. + +``` yaml +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: +name: keystone-auth +namespace: openstack +spec: +method: kubernetes +mount: genestack +kubernetes: + role: osh + serviceAccount: default + audiences: + - vault +vaultConnectionRef: vault-connection +``` ### Create Vault static: -- Define a `VaultStaticSecret` resource to fetch a secret from Vault and create a Kubernetes Secret resource: - ``` - apiVersion: secrets.hashicorp.com/v1beta1 - kind: VaultStaticSecret - metadata: - name: keystone-rabbitmq-password - namespace: openstack - spec: - type: kv-v2 - # mount path - mount: 'osh/keystone' +Define a `VaultStaticSecret` resource to fetch a secret from Vault and create a Kubernetes Secret resource. - # path of the secret - path: keystone-rabbitmq-password +``` yaml +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: +name: keystone-rabbitmq-password +namespace: openstack +spec: +type: kv-v2 - # dest k8s secret - destination: - name: keystone-rabbitmq-password - create: true +# mount path +mount: 'osh/keystone' - # static secret refresh interval - refreshAfter: 30s +# path of the secret +path: keystone-rabbitmq-password - # Name of the CRD to authenticate to Vault - vaultAuthRef: keystone-auth - ``` - This `VaultStaticSecret` resource fetches the `keystone-rabbitmq-password` secret from Vault and creates a Kubernetes Secret named `keystone-rabbitmq-password` in the openstack namespace which you can further use in the Genestack running on Kubernetes. -## Example usage: -``` -# From Vault: -/ $ vault kv get osh/keystone/keystone-rabbitmq-password -================ Secret Path ================ -osh/keystone/data/keystone-rabbitmq-password - -======= Metadata ======= -Key Value ---- ----- -created_time 2024-02-21T12:13:20.961200482Z -custom_metadata -deletion_time n/a -destroyed false -version 1 - -====== Data ====== -Key Value ---- ----- -password EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr - -# From Kubernetes: -$ kubectl apply -f vaultconnection.yaml -$ kubectl apply -f vault-auth.yaml -$ kubectl apply -f keystone-rabbitmq-password-vault.yaml - -$ kubectl get secret keystone-rabbitmq-password -n openstack -NAME TYPE DATA AGE -keystone-rabbitmq-password Opaque 2 14h - -$ kubectl get secret keystone-rabbitmq-password -n openstack -o yaml -apiVersion: v1 -data: - _raw: eyJkYXRhIjp7InBhc3N3b3JkIjoiRUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpyciJ9LCJtZXRhZGF0YSI6eyJjcmVhdGVkX3RpbWUiOiIyMDI0LTAyLTIxVDEyOjEzOjIwLjk2MTIwMDQ4MloiLCJjdXN0b21fbWV0YWRhdGEiOm51bGwsImRlbGV0aW9uX3RpbWUiOiIiLCJkZXN0cm95ZWQiOmZhbHNlLCJ2ZXJzaW9uIjoxfX0= - password: RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg== -kind: Secret -[...] - -$ echo "RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg==" |base64 -d -EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr +# dest k8s secret +destination: + name: keystone-rabbitmq-password + create: true + +# static secret refresh interval +refreshAfter: 30s + +# Name of the CRD to authenticate to Vault +vaultAuthRef: keystone-auth ``` + +This `VaultStaticSecret` resource fetches the `keystone-rabbitmq-password` secret from Vault and creates a Kubernetes Secret named `keystone-rabbitmq-password` in the openstack namespace which you can further use in the Genestack running on Kubernetes. + +!!! example "Example usage workflow" + + ``` shell + # From Vault: + vault kv get osh/keystone/keystone-rabbitmq-password + ================ Secret Path ================ + osh/keystone/data/keystone-rabbitmq-password + + ======= Metadata ======= + Key Value + --- ----- + created_time 2024-02-21T12:13:20.961200482Z + custom_metadata + deletion_time n/a + destroyed false + version 1 + + ====== Data ====== + Key Value + --- ----- + password EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr + ``` + + Apply the reuired configuration files. + + ``` shell + # From Kubernetes: + kubectl apply -f vaultconnection.yaml + kubectl apply -f vault-auth.yaml + kubectl apply -f keystone-rabbitmq-password-vault.yaml + ``` + + Return the secret in YAML + + ``` shell + kubectl get secret keystone-rabbitmq-password -n openstack -o yaml + apiVersion: v1 + data: + _raw: eyJkYXRhIjp7InBhc3N3b3JkIjoiRUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpyciJ9LCJtZXRhZGF0YSI6eyJjcmVhdGVkX3 RpbWUiOiIyMDI0LTAyLTIxVDEyOjEzOjIwLjk2MTIwMDQ4MloiLCJjdXN0b21fbWV0YWRhdGEiOm51bGwsImRlbGV0aW9uX3RpbWUiOiIiLCJkZXN0cm95ZWQiOmZhbHNlLCJ2ZXJzaW9uIjox fX0= + password: RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg== + kind: Secret + [...] + ``` + + Check the return password. + + ``` shell + echo "RUVORjFTZktPVmtJTFRHVnpmdEpoZGo1QTZtd25iY0NMZ2R0dGFoaEtzUVZ4Q1dIcklyaGMwdGhlQ0czVHpycg==" | base64 -d + EENF1SfKOVkILTGVzftJhdj5A6mwnbcCLgdttahhKsQVxCWHrIrhc0theCG3Tzrr + ``` diff --git a/docs/vault.md b/docs/vault.md index 62b03496..dc911161 100644 --- a/docs/vault.md +++ b/docs/vault.md @@ -78,7 +78,7 @@ On each Vault pod (vault-1, vault-2), use any of the 2 unseal keys obtained duri ``` shell kubectl exec -it vault-1 -n vault -- vault operator unseal ``` -```shell +``` shell kubectl exec -it vault-2 -n vault -- vault operator unseal ``` diff --git a/helm-configs/ceilometer/ceilometer-helm-overrides.yaml b/helm-configs/ceilometer/ceilometer-helm-overrides.yaml new file mode 100644 index 00000000..952324d7 --- /dev/null +++ b/helm-configs/ceilometer/ceilometer-helm-overrides.yaml @@ -0,0 +1,2182 @@ +--- +release_group: null + +labels: + compute: + node_selector_key: openstack-compute-node + node_selector_value: enabled + central: + node_selector_key: openstack-control-plane + node_selector_value: enabled + ipmi: + node_selector_key: openstack-node + node_selector_value: enabled + notification: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + test: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + tags: + test: docker.io/xrally/xrally-openstack:2.0.0 + ceilometer_db_sync: docker.io/kolla/ubuntu-source-ceilometer-base:yoga + rabbit_init: docker.io/rabbitmq:3.7-management + ks_user: docker.io/openstackhelm/heat:wallaby-ubuntu_focal + ks_service: docker.io/openstackhelm/heat:wallaby-ubuntu_focal + ceilometer_central: docker.io/kolla/ubuntu-source-ceilometer-central:yoga + ceilometer_compute: docker.io/kolla/ubuntu-source-ceilometer-compute:yoga + ceilometer_ipmi: docker.io/kolla/ubuntu-source-ceilometer-base:yoga + ceilometer_notification: docker.io/kolla/ubuntu-source-ceilometer-notification:yoga + dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: docker.io/docker:17.07.0 + pull_policy: "IfNotPresent" + local_registry: + active: false + exclude: + - dep_check + - image_repo_sync + +ipmi_device: /dev/ipmi0 + +conf: + ceilometer: + DEFAULT: + debug: "false" +# default_log_levels: >- +# amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO, +# oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=DEBUG, +# urllib3.connectionpool=DEBUG,websocket=WARN,requests.packages.urllib3.util.retry=DEBUG, +# urllib3.util.retry=DEBUG,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN, +# taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO + event_dispatchers: + type: multistring + values: + - gnocchi + meter_dispatchers: + type: multistring + values: + - gnocchi + api: + aodh_is_enabled: "False" + aodh_url: "NotUsed" + dispatcher_gnocchi: + filter_service_activity: False + archive_policy: low + resources_definition_file: /etc/ceilometer/gnocchi_resources.yaml + database: + connection: "NotUsed" + event_connection: "NotUsed" + metering_connection: "NotUsed" + max_retries: -1 + dispatcher: + archive_policy: low + filter_project: service + keystone_authtoken: + auth_type: password + auth_version: v3 + service_credentials: + auth_type: password + interface: internal + notification: + messaging_urls: + type: multistring + values: + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/ceilometer + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/cinder + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/glance + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/nova + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/keystone + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/neutron + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/heat + oslo_messaging_notifications: + driver: messagingv2 + topics: + - notifications + - profiler + oslo_policy: + policy_file: /etc/ceilometer/policy.yaml + cache: + enabled: true + backend: dogpile.cache.memcached + expiration_time: 86400 + event_definitions: + - event_type: 'compute.instance.*' + traits: &instance_traits + tenant_id: + fields: payload.tenant_id + user_id: + fields: payload.user_id + instance_id: + fields: payload.instance_id + display_name: + fields: payload.display_name + resource_id: + fields: payload.instance_id + cell_name: + fields: payload.cell_name + host: + fields: publisher_id.`split(., 1, 1)` + service: + fields: publisher_id.`split(., 0, -1)` + memory_mb: + type: int + fields: payload.memory_mb + disk_gb: + type: int + fields: payload.disk_gb + root_gb: + type: int + fields: payload.root_gb + ephemeral_gb: + type: int + fields: payload.ephemeral_gb + vcpus: + type: int + fields: payload.vcpus + instance_type_id: + fields: payload.instance_type_id + instance_type: + fields: payload.instance_type + state: + fields: payload.state + os_architecture: + fields: payload.image_meta.'org.openstack__1__architecture' + os_version: + fields: payload.image_meta.'org.openstack__1__os_version' + os_distro: + fields: payload.image_meta.'org.openstack__1__os_distro' + launched_at: + type: datetime + fields: payload.launched_at + deleted_at: + type: datetime + fields: payload.deleted_at + - event_type: compute.instance.create.end + traits: + <<: *instance_traits + availability_zone: + fields: payload.availability_zone + - event_type: compute.instance.update + traits: + <<: *instance_traits + old_state: + fields: payload.old_state + - event_type: compute.instance.exists + traits: + <<: *instance_traits + audit_period_beginning: + type: datetime + fields: payload.audit_period_beginning + audit_period_ending: + type: datetime + fields: payload.audit_period_ending + - event_type: ['volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*', 'volume.transfer.accept.end', 'snapshot.transfer.accept.end'] + traits: &cinder_traits + user_id: + fields: payload.user_id + project_id: + fields: payload.tenant_id + availability_zone: + fields: payload.availability_zone + display_name: + fields: payload.display_name + replication_status: + fields: payload.replication_status + status: + fields: payload.status + created_at: + type: datetime + fields: payload.created_at + image_id: + fields: payload.glance_metadata[?key=image_id].value + instance_id: + fields: payload.volume_attachment[0].server_id + - event_type: ['volume.transfer.*', 'volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.transfer.accept.end'] + traits: + <<: *cinder_traits + resource_id: + fields: payload.volume_id + host: + fields: payload.host + size: + type: int + fields: payload.size + type: + fields: payload.volume_type + replication_status: + fields: payload.replication_status + - event_type: ['snapshot.transfer.accept.end'] + traits: + <<: *cinder_traits + resource_id: + fields: payload.snapshot_id + project_id: + fields: payload.tenant_id + - event_type: ['share.create.*', 'share.delete.*', 'share.extend.*', 'share.shrink.*'] + traits: &share_traits + share_id: + fields: payload.share_id + user_id: + fields: payload.user_id + project_id: + fields: payload.tenant_id + snapshot_id: + fields: payload.snapshot_id + availability_zone: + fields: payload.availability_zone + status: + fields: payload.status + created_at: + type: datetime + fields: payload.created_at + share_group_id: + fields: payload.share_group_id + size: + type: int + fields: payload.size + name: + fields: payload.name + proto: + fields: payload.proto + is_public: + fields: payload.is_public + description: + fields: payload.description + host: + fields: payload.host + - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] + traits: + <<: *cinder_traits + resource_id: + fields: payload.snapshot_id + volume_id: + fields: payload.volume_id + - event_type: ['image_volume_cache.*'] + traits: + image_id: + fields: payload.image_id + host: + fields: payload.host + - event_type: ['image.create', 'image.update', 'image.upload', 'image.delete'] + traits: &glance_crud + project_id: + fields: payload.owner + resource_id: + fields: payload.id + name: + fields: payload.name + status: + fields: payload.status + created_at: + type: datetime + fields: payload.created_at + user_id: + fields: payload.owner + deleted_at: + type: datetime + fields: payload.deleted_at + size: + type: int + fields: payload.size + - event_type: image.send + traits: &glance_send + receiver_project: + fields: payload.receiver_tenant_id + receiver_user: + fields: payload.receiver_user_id + user_id: + fields: payload.owner_id + image_id: + fields: payload.image_id + destination_ip: + fields: payload.destination_ip + bytes_sent: + type: int + fields: payload.bytes_sent + - event_type: orchestration.stack.* + traits: &orchestration_crud + project_id: + fields: payload.tenant_id + user_id: + fields: ['ctxt.trustor_user_id', 'ctxt.user_id'] + resource_id: + fields: payload.stack_identity + name: + fields: payload.name + - event_type: sahara.cluster.* + traits: &sahara_crud + project_id: + fields: payload.project_id + user_id: + fields: ctxt.user_id + resource_id: + fields: payload.cluster_id + name: + fields: payload.name + - event_type: sahara.cluster.health + traits: &sahara_health + <<: *sahara_crud + verification_id: + fields: payload.verification_id + health_check_status: + fields: payload.health_check_status + health_check_name: + fields: payload.health_check_name + health_check_description: + fields: payload.health_check_description + created_at: + type: datetime + fields: payload.created_at + updated_at: + type: datetime + fields: payload.updated_at + - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', + 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] + traits: &identity_crud + resource_id: + fields: payload.resource_info + initiator_id: + fields: payload.initiator.id + project_id: + fields: payload.initiator.project_id + domain_id: + fields: payload.initiator.domain_id + - event_type: identity.role_assignment.* + traits: &identity_role_assignment + role: + fields: payload.role + group: + fields: payload.group + domain: + fields: payload.domain + user: + fields: payload.user + project: + fields: payload.project + - event_type: identity.authenticate + traits: &identity_authenticate + typeURI: + fields: payload.typeURI + id: + fields: payload.id + action: + fields: payload.action + eventType: + fields: payload.eventType + eventTime: + type: datetime + fields: payload.eventTime + outcome: + fields: payload.outcome + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_id: + fields: payload.initiator.id + initiator_name: + fields: payload.initiator.name + initiator_host_agent: + fields: payload.initiator.host.agent + initiator_host_addr: + fields: payload.initiator.host.address + target_typeURI: + fields: payload.target.typeURI + target_id: + fields: payload.target.id + observer_typeURI: + fields: payload.observer.typeURI + observer_id: + fields: payload.observer.id + - event_type: objectstore.http.request + traits: &objectstore_request + typeURI: + fields: payload.typeURI + id: + fields: payload.id + action: + fields: payload.action + eventType: + fields: payload.eventType + eventTime: + type: datetime + fields: payload.eventTime + outcome: + fields: payload.outcome + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_id: + fields: payload.initiator.id + initiator_project_id: + fields: payload.initiator.project_id + target_typeURI: + fields: payload.target.typeURI + target_id: + fields: payload.target.id + target_action: + fields: payload.target.action + target_metadata_path: + fields: payload.target.metadata.path + target_metadata_version: + fields: payload.target.metadata.version + target_metadata_container: + fields: payload.target.metadata.container + target_metadata_object: + fields: payload.target.metadata.object + observer_id: + fields: payload.observer.id + - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] + traits: &network_traits + user_id: + fields: ctxt.user_id + project_id: + fields: ctxt.tenant_id + - event_type: network.* + traits: + <<: *network_traits + name: + fields: payload.network.name + resource_id: + fields: ['payload.network.id', 'payload.id'] + - event_type: subnet.* + traits: + <<: *network_traits + name: + fields: payload.subnet.name + resource_id: + fields: ['payload.subnet.id', 'payload.id'] + - event_type: port.* + traits: + <<: *network_traits + name: + fields: payload.port.name + resource_id: + fields: ['payload.port.id', 'payload.id'] + - event_type: router.* + traits: + <<: *network_traits + name: + fields: payload.router.name + resource_id: + fields: ['payload.router.id', 'payload.id'] + - event_type: floatingip.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.floatingip.id', 'payload.id'] + - event_type: pool.* + traits: + <<: *network_traits + name: + fields: payload.pool.name + resource_id: + fields: ['payload.pool.id', 'payload.id'] + - event_type: vip.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.vip.id', 'payload.id'] + - event_type: member.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.member.id', 'payload.id'] + - event_type: health_monitor.* + traits: + <<: *network_traits + name: + fields: payload.health_monitor.name + resource_id: + fields: ['payload.health_monitor.id', 'payload.id'] + - event_type: healthmonitor.* + traits: + <<: *network_traits + name: + fields: payload.healthmonitor.name + resource_id: + fields: ['payload.healthmonitor.id', 'payload.id'] + - event_type: listener.* + traits: + <<: *network_traits + name: + fields: payload.listener.name + resource_id: + fields: ['payload.listener.id', 'payload.id'] + - event_type: loadbalancer.* + traits: + <<: *network_traits + name: + fields: payload.loadbalancer.name + resource_id: + fields: ['payload.loadbalancer.id', 'payload.id'] + - event_type: firewall.* + traits: + <<: *network_traits + name: + fields: payload.firewall.name + resource_id: + fields: ['payload.firewall.id', 'payload.id'] + - event_type: firewall_policy.* + traits: + <<: *network_traits + name: + fields: payload.firewall_policy.name + resource_id: + fields: ['payload.firewall_policy.id', 'payload.id'] + - event_type: firewall_rule.* + traits: + <<: *network_traits + name: + fields: payload.firewall_rule.name + resource_id: + fields: ['payload.firewall_rule.id', 'payload.id'] + - event_type: vpnservice.* + traits: + <<: *network_traits + name: + fields: payload.vpnservice.name + resource_id: + fields: ['payload.vpnservice.id', 'payload.id'] + - event_type: ipsecpolicy.* + traits: + <<: *network_traits + name: + fields: payload.ipsecpolicy.name + resource_id: + fields: ['payload.ipsecpolicy.id', 'payload.id'] + - event_type: ikepolicy.* + traits: + <<: *network_traits + name: + fields: payload.ikepolicy.name + resource_id: + fields: ['payload.ikepolicy.id', 'payload.id'] + - event_type: ipsec_site_connection.* + traits: + <<: *network_traits + resource_id: + fields: ['payload.ipsec_site_connection.id', 'payload.id'] + - event_type: '*http.*' + traits: &http_audit + project_id: + fields: payload.initiator.project_id + user_id: + fields: payload.initiator.id + typeURI: + fields: payload.typeURI + eventType: + fields: payload.eventType + action: + fields: payload.action + outcome: + fields: payload.outcome + id: + fields: payload.id + eventTime: + type: datetime + fields: payload.eventTime + requestPath: + fields: payload.requestPath + observer_id: + fields: payload.observer.id + target_id: + fields: payload.target.id + target_typeURI: + fields: payload.target.typeURI + target_name: + fields: payload.target.name + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_id: + fields: payload.initiator.id + initiator_name: + fields: payload.initiator.name + initiator_host_address: + fields: payload.initiator.host.address + - event_type: '*http.response' + traits: + <<: *http_audit + reason_code: + fields: payload.reason.reasonCode + - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] + traits: &dns_domain_traits + status: + fields: payload.status + retry: + fields: payload.retry + description: + fields: payload.description + expire: + fields: payload.expire + email: + fields: payload.email + ttl: + fields: payload.ttl + action: + fields: payload.action + name: + fields: payload.name + resource_id: + fields: payload.id + created_at: + type: datetime + fields: payload.created_at + updated_at: + type: datetime + fields: payload.updated_at + version: + fields: payload.version + parent_domain_id: + fields: parent_domain_id + serial: + fields: payload.serial + - event_type: dns.domain.exists + traits: + <<: *dns_domain_traits + audit_period_beginning: + type: datetime + fields: payload.audit_period_beginning + audit_period_ending: + type: datetime + fields: payload.audit_period_ending + - event_type: trove.* + traits: &trove_base_traits + instance_type: + fields: payload.instance_type + user_id: + fields: payload.user_id + resource_id: + fields: payload.instance_id + instance_type_id: + fields: payload.instance_type_id + launched_at: + type: datetime + fields: payload.launched_at + instance_name: + fields: payload.instance_name + state: + fields: payload.state + nova_instance_id: + fields: payload.nova_instance_id + service_id: + fields: payload.service_id + created_at: + type: datetime + fields: payload.created_at + region: + fields: payload.region + - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] + traits: &trove_common_traits + name: + fields: payload.name + availability_zone: + fields: payload.availability_zone + instance_size: + type: int + fields: payload.instance_size + volume_size: + type: int + fields: payload.volume_size + nova_volume_id: + fields: payload.nova_volume_id + - event_type: trove.instance.create + traits: + <<: [*trove_base_traits, *trove_common_traits] + - event_type: trove.instance.modify_volume + traits: + <<: [*trove_base_traits, *trove_common_traits] + old_volume_size: + type: int + fields: payload.old_volume_size + modify_at: + type: datetime + fields: payload.modify_at + - event_type: trove.instance.modify_flavor + traits: + <<: [*trove_base_traits, *trove_common_traits] + old_instance_size: + type: int + fields: payload.old_instance_size + modify_at: + type: datetime + fields: payload.modify_at + - event_type: trove.instance.delete + traits: + <<: [*trove_base_traits, *trove_common_traits] + deleted_at: + type: datetime + fields: payload.deleted_at + - event_type: trove.instance.exists + traits: + <<: *trove_base_traits + display_name: + fields: payload.display_name + audit_period_beginning: + type: datetime + fields: payload.audit_period_beginning + audit_period_ending: + type: datetime + fields: payload.audit_period_ending + - event_type: profiler.* + traits: + project: + fields: payload.project + service: + fields: payload.service + name: + fields: payload.name + base_id: + fields: payload.base_id + trace_id: + fields: payload.trace_id + parent_id: + fields: payload.parent_id + timestamp: + type: datetime + fields: payload.timestamp + host: + fields: payload.info.host + path: + fields: payload.info.request.path + query: + fields: payload.info.request.query + method: + fields: payload.info.request.method + scheme: + fields: payload.info.request.scheme + db.statement: + fields: payload.info.db.statement + db.params: + fields: payload.info.db.params + - event_type: 'magnum.cluster.*' + traits: &magnum_cluster_crud + id: + fields: payload.id + typeURI: + fields: payload.typeURI + eventType: + fields: payload.eventType + eventTime: + type: datetime + fields: payload.eventTime + action: + fields: payload.action + outcome: + fields: payload.outcome + initiator_id: + fields: payload.initiator.id + initiator_typeURI: + fields: payload.initiator.typeURI + initiator_name: + fields: payload.initiator.name + initiator_host_agent: + fields: payload.initiator.host.agent + initiator_host_address: + fields: payload.initiator.host.address + target_id: + fields: payload.target.id + target_typeURI: + fields: payload.target.typeURI + observer_id: + fields: payload.observer.id + observer_typeURI: + fields: payload.observer.typeURI + - event_type: 'alarm.*' + traits: + id: + fields: payload.alarm_id + user_id: + fields: payload.user_id + project_id: + fields: payload.project_id + on_behalf_of: + fields: payload.on_behalf_of + severity: + fields: payload.severity + detail: + fields: payload.detail + type: + fields: payload.type + + gnocchi_resources: + archive_policy_default: ceilometer-low + archive_policies: + # NOTE(sileht): We keep "mean" for now to not break all gating that + # use the current tempest scenario. + - name: ceilometer-low + aggregation_methods: + - mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days + - name: ceilometer-low-rate + aggregation_methods: + - mean + - rate:mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days + - name: ceilometer-high + aggregation_methods: + - mean + back_window: 0 + definition: + - granularity: 1 second + timespan: 1 hour + - granularity: 1 minute + timespan: 1 day + - granularity: 1 hour + timespan: 365 days + - name: ceilometer-high-rate + aggregation_methods: + - mean + - rate:mean + back_window: 0 + definition: + - granularity: 1 second + timespan: 1 hour + - granularity: 1 minute + timespan: 1 day + - granularity: 1 hour + timespan: 365 days + + resources: + - resource_type: identity + metrics: + identity.authenticate.success: + identity.authenticate.pending: + identity.authenticate.failure: + identity.user.created: + identity.user.deleted: + identity.user.updated: + identity.group.created: + identity.group.deleted: + identity.group.updated: + identity.role.created: + identity.role.deleted: + identity.role.updated: + identity.project.created: + identity.project.deleted: + identity.project.updated: + identity.trust.created: + identity.trust.deleted: + identity.role_assignment.created: + identity.role_assignment.deleted: + + - resource_type: ceph_account + metrics: + radosgw.objects: + radosgw.objects.size: + radosgw.objects.containers: + radosgw.api.request: + radosgw.containers.objects: + radosgw.containers.objects.size: + + - resource_type: instance + metrics: + memory: + memory.usage: + memory.resident: + memory.swap.in: + memory.swap.out: + memory.bandwidth.total: + memory.bandwidth.local: + vcpus: + cpu: + archive_policy_name: ceilometer-low-rate + cpu_l3_cache: + disk.root.size: + disk.ephemeral.size: + disk.latency: + disk.iops: + disk.capacity: + disk.allocation: + disk.usage: + compute.instance.booting.time: + perf.cpu.cycles: + perf.instructions: + perf.cache.references: + perf.cache.misses: + attributes: + host: resource_metadata.(instance_host|host) + image_ref: resource_metadata.image_ref + launched_at: resource_metadata.launched_at + created_at: resource_metadata.created_at + deleted_at: resource_metadata.deleted_at + display_name: resource_metadata.display_name + flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) + flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) + server_group: resource_metadata.user_metadata.server_group + event_delete: compute.instance.delete.start + event_create: compute.instance.create.end + event_attributes: + id: instance_id + display_name: display_name + host: host + availability_zone: availability_zone + flavor_id: instance_type_id + flavor_name: instance_type + user_id: user_id + project_id: project_id + event_associated_resources: + instance_network_interface: '{"=": {"instance_id": "%s"}}' + instance_disk: '{"=": {"instance_id": "%s"}}' + + - resource_type: instance_network_interface + metrics: + network.outgoing.packets: + archive_policy_name: ceilometer-low-rate + network.incoming.packets: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.drop: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.drop: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.error: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.error: + archive_policy_name: ceilometer-low-rate + network.outgoing.bytes: + archive_policy_name: ceilometer-low-rate + network.incoming.bytes: + archive_policy_name: ceilometer-low-rate + attributes: + name: resource_metadata.vnic_name + instance_id: resource_metadata.instance_id + + - resource_type: instance_disk + metrics: + disk.device.read.requests: + archive_policy_name: ceilometer-low-rate + disk.device.write.requests: + archive_policy_name: ceilometer-low-rate + disk.device.read.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.write.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.latency: + disk.device.read.latency: + disk.device.write.latency: + disk.device.iops: + disk.device.capacity: + disk.device.allocation: + disk.device.usage: + attributes: + name: resource_metadata.disk_name + instance_id: resource_metadata.instance_id + + - resource_type: image + metrics: + image.size: + image.download: + image.serve: + attributes: + name: resource_metadata.name + container_format: resource_metadata.container_format + disk_format: resource_metadata.disk_format + event_delete: image.delete + event_attributes: + id: resource_id + + - resource_type: ipmi + metrics: + hardware.ipmi.node.power: + hardware.ipmi.node.temperature: + hardware.ipmi.node.inlet_temperature: + hardware.ipmi.node.outlet_temperature: + hardware.ipmi.node.fan: + hardware.ipmi.node.current: + hardware.ipmi.node.voltage: + hardware.ipmi.node.airflow: + hardware.ipmi.node.cups: + hardware.ipmi.node.cpu_util: + hardware.ipmi.node.mem_util: + hardware.ipmi.node.io_util: + + - resource_type: ipmi_sensor + metrics: + - 'hardware.ipmi.power' + - 'hardware.ipmi.temperature' + - 'hardware.ipmi.current' + - 'hardware.ipmi.voltage' + attributes: + node: resource_metadata.node + + - resource_type: network + metrics: + bandwidth: + ip.floating: + event_delete: floatingip.delete.end + event_attributes: + id: resource_id + + - resource_type: stack + metrics: + stack.create: + stack.update: + stack.delete: + stack.resume: + stack.suspend: + + - resource_type: swift_account + metrics: + storage.objects.incoming.bytes: + storage.objects.outgoing.bytes: + storage.objects.size: + storage.objects: + storage.objects.containers: + storage.containers.objects: + storage.containers.objects.size: + + - resource_type: volume + metrics: + volume: + volume.size: + snapshot.size: + volume.snapshot.size: + volume.backup.size: + backup.size: + volume.manage_existing.start: + volume.manage_existing.end: + volume.manage_existing_snapshot.start: + volume.manage_existing_snapshot.end: + attributes: + display_name: resource_metadata.(display_name|name) + volume_type: resource_metadata.volume_type + image_id: resource_metadata.image_id + instance_id: resource_metadata.instance_id + event_delete: + - volume.delete.end + - snapshot.delete.end + event_update: + - volume.transfer.accept.end + - snapshot.transfer.accept.end + event_attributes: + id: resource_id + project_id: project_id + + - resource_type: volume_provider + metrics: + volume.provider.capacity.total: + volume.provider.capacity.free: + volume.provider.capacity.allocated: + volume.provider.capacity.provisioned: + volume.provider.capacity.virtual_free: + + - resource_type: volume_provider_pool + metrics: + volume.provider.pool.capacity.total: + volume.provider.pool.capacity.free: + volume.provider.pool.capacity.allocated: + volume.provider.pool.capacity.provisioned: + volume.provider.pool.capacity.virtual_free: + attributes: + provider: resource_metadata.provider + + - resource_type: host + metrics: + hardware.cpu.load.1min: + hardware.cpu.load.5min: + hardware.cpu.load.15min: + hardware.cpu.util: + hardware.cpu.user: + archive_policy_name: ceilometer-low-rate + hardware.cpu.nice: + archive_policy_name: ceilometer-low-rate + hardware.cpu.system: + archive_policy_name: ceilometer-low-rate + hardware.cpu.idle: + archive_policy_name: ceilometer-low-rate + hardware.cpu.wait: + archive_policy_name: ceilometer-low-rate + hardware.cpu.kernel: + archive_policy_name: ceilometer-low-rate + hardware.cpu.interrupt: + archive_policy_name: ceilometer-low-rate + hardware.memory.total: + hardware.memory.used: + hardware.memory.swap.total: + hardware.memory.swap.avail: + hardware.memory.buffer: + hardware.memory.cached: + hardware.network.ip.outgoing.datagrams: + hardware.network.ip.incoming.datagrams: + hardware.system_stats.cpu.idle: + hardware.system_stats.io.outgoing.blocks: + hardware.system_stats.io.incoming.blocks: + attributes: + host_name: resource_metadata.resource_url + + - resource_type: host_disk + metrics: + hardware.disk.size.total: + hardware.disk.size.used: + hardware.disk.read.bytes: + hardware.disk.write.bytes: + hardware.disk.read.requests: + hardware.disk.write.requests: + attributes: + host_name: resource_metadata.resource_url + device_name: resource_metadata.device + + - resource_type: host_network_interface + metrics: + hardware.network.incoming.bytes: + hardware.network.outgoing.bytes: + hardware.network.outgoing.errors: + attributes: + host_name: resource_metadata.resource_url + device_name: resource_metadata.name + + - resource_type: nova_compute + metrics: + compute.node.cpu.frequency: + compute.node.cpu.idle.percent: + compute.node.cpu.idle.time: + compute.node.cpu.iowait.percent: + compute.node.cpu.iowait.time: + compute.node.cpu.kernel.percent: + compute.node.cpu.kernel.time: + compute.node.cpu.percent: + compute.node.cpu.user.percent: + compute.node.cpu.user.time: + attributes: + host_name: resource_metadata.host + + - resource_type: manila_share + metrics: + manila.share.size: + attributes: + name: resource_metadata.name + host: resource_metadata.host + status: resource_metadata.status + availability_zone: resource_metadata.availability_zone + protocol: resource_metadata.protocol + + - resource_type: switch + metrics: + switch: + switch.ports: + attributes: + controller: resource_metadata.controller + + - resource_type: switch_port + metrics: + switch.port: + switch.port.uptime: + switch.port.receive.packets: + switch.port.transmit.packets: + switch.port.receive.bytes: + switch.port.transmit.bytes: + switch.port.receive.drops: + switch.port.transmit.drops: + switch.port.receive.errors: + switch.port.transmit.errors: + switch.port.receive.frame_error: + switch.port.receive.overrun_error: + switch.port.receive.crc_error: + switch.port.collision.count: + attributes: + switch: resource_metadata.switch + port_number_on_switch: resource_metadata.port_number_on_switch + neutron_port_id: resource_metadata.neutron_port_id + controller: resource_metadata.controller + + - resource_type: port + metrics: + port: + port.uptime: + port.receive.packets: + port.transmit.packets: + port.receive.bytes: + port.transmit.bytes: + port.receive.drops: + port.receive.errors: + attributes: + controller: resource_metadata.controller + + - resource_type: switch_table + metrics: + switch.table.active.entries: + attributes: + controller: resource_metadata.controller + switch: resource_metadata.switch + + - resource_type: loadbalancer + metrics: + network.services.lb.outgoing.bytes: + network.services.lb.incoming.bytes: + network.services.lb.pool: + network.services.lb.listener: + network.services.lb.member: + network.services.lb.health_monitor: + network.services.lb.loadbalancer: + network.services.lb.total.connections: + network.services.lb.active.connections: + meters: + metric: + # Image + - name: "image.size" + event_type: + - "image.upload" + - "image.delete" + - "image.update" + type: "gauge" + unit: B + volume: $.payload.size + resource_id: $.payload.id + project_id: $.payload.owner + + - name: "image.download" + event_type: "image.send" + type: "delta" + unit: "B" + volume: $.payload.bytes_sent + resource_id: $.payload.image_id + user_id: $.payload.receiver_user_id + project_id: $.payload.receiver_tenant_id + + - name: "image.serve" + event_type: "image.send" + type: "delta" + unit: "B" + volume: $.payload.bytes_sent + resource_id: $.payload.image_id + project_id: $.payload.owner_id + + - name: 'volume.provider.capacity.total' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.total + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.free' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.free + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.allocated' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.allocated + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.provisioned' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.provisioned + resource_id: $.payload.name_to_id + + - name: 'volume.provider.capacity.virtual_free' + event_type: 'capacity.backend.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.virtual_free + resource_id: $.payload.name_to_id + + - name: 'volume.provider.pool.capacity.total' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.total + resource_id: $.payload.name_to_id + metadata: &provider_pool_meta + provider: $.payload.name_to_id.`split(#, 0, 1)` + + - name: 'volume.provider.pool.capacity.free' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.free + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.provider.pool.capacity.allocated' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.allocated + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.provider.pool.capacity.provisioned' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.provisioned + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.provider.pool.capacity.virtual_free' + event_type: 'capacity.pool.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.virtual_free + resource_id: $.payload.name_to_id + metadata: + <<: *provider_pool_meta + + - name: 'volume.size' + event_type: + - 'volume.exists' + - 'volume.retype' + - 'volume.create.*' + - 'volume.delete.*' + - 'volume.resize.*' + - 'volume.attach.*' + - 'volume.detach.*' + - 'volume.update.*' + - 'volume.manage.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.volume_id + metadata: + display_name: $.payload.display_name + volume_type: $.payload.volume_type + image_id: $.payload.glance_metadata[?key=image_id].value + instance_id: $.payload.volume_attachment[0].server_id + + - name: 'snapshot.size' + event_type: + - 'snapshot.exists' + - 'snapshot.create.*' + - 'snapshot.delete.*' + - 'snapshot.manage.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.volume_size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.snapshot_id + metadata: + display_name: $.payload.display_name + + - name: 'backup.size' + event_type: + - 'backup.exists' + - 'backup.create.*' + - 'backup.delete.*' + - 'backup.restore.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.size + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.backup_id + metadata: + display_name: $.payload.display_name + + # Magnum + - name: $.payload.metrics.[*].name + event_type: 'magnum.bay.metrics.*' + type: 'gauge' + unit: $.payload.metrics.[*].unit + volume: $.payload.metrics.[*].value + user_id: $.payload.user_id + project_id: $.payload.project_id + resource_id: $.payload.resource_id + lookup: ['name', 'unit', 'volume'] + + # Swift + - name: $.payload.measurements.[*].metric.[*].name + event_type: 'objectstore.http.request' + type: 'delta' + unit: $.payload.measurements.[*].metric.[*].unit + volume: $.payload.measurements.[*].result + resource_id: $.payload.target.id + user_id: $.payload.initiator.id + project_id: $.payload.initiator.project_id + lookup: ['name', 'unit', 'volume'] + + - name: 'memory' + event_type: &instance_events compute.instance.(?!create.start|update).* + type: 'gauge' + unit: 'MB' + volume: $.payload.memory_mb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: &instance_meta + host: $.payload.host + flavor_id: $.payload.instance_flavor_id + flavor_name: $.payload.instance_type + display_name: $.payload.display_name + image_ref: $.payload.image_meta.base_image_ref + launched_at: $.payload.launched_at + created_at: $.payload.created_at + deleted_at: $.payload.deleted_at + + - name: 'vcpus' + event_type: *instance_events + type: 'gauge' + unit: 'vcpu' + volume: $.payload.vcpus + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'compute.instance.booting.time' + event_type: 'compute.instance.create.end' + type: 'gauge' + unit: 'sec' + volume: + fields: [$.payload.created_at, $.payload.launched_at] + plugin: 'timedelta' + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'disk.root.size' + event_type: *instance_events + type: 'gauge' + unit: 'GB' + volume: $.payload.root_gb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'disk.ephemeral.size' + event_type: *instance_events + type: 'gauge' + unit: 'GB' + volume: $.payload.ephemeral_gb + user_id: $.payload.user_id + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_metadata: $.payload.metadata + metadata: + <<: *instance_meta + + - name: 'bandwidth' + event_type: 'l3.meter' + type: 'delta' + unit: 'B' + volume: $.payload.bytes + project_id: $.payload.tenant_id + resource_id: $.payload.label_id + + - name: 'compute.node.cpu.frequency' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'MHz' + volume: $.payload.metrics[?(@.name='cpu.frequency')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.frequency')].source + + - name: 'compute.node.cpu.user.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.user.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.user.time')].source + + - name: 'compute.node.cpu.kernel.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.kernel.time')].source + + - name: 'compute.node.cpu.idle.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.idle.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.idle.time')].source + + - name: 'compute.node.cpu.iowait.time' + event_type: 'compute.metrics.update' + type: 'cumulative' + unit: 'ns' + volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.iowait.time')].source + + - name: 'compute.node.cpu.kernel.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source + + - name: 'compute.node.cpu.idle.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.idle.percent')].source + + - name: 'compute.node.cpu.user.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.user.percent')].source + + - name: 'compute.node.cpu.iowait.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source + + - name: 'compute.node.cpu.percent' + event_type: 'compute.metrics.update' + type: 'gauge' + unit: 'percent' + volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 + resource_id: $.payload.host + "_" + $.payload.nodename + timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp + metadata: + event_type: $.event_type + host: $.publisher_id + source: $.payload.metrics[?(@.name='cpu.percent')].source + + # Identity + # NOTE(gordc): hack because jsonpath-rw-ext can't concat starting with string. + - name: $.payload.outcome - $.payload.outcome + 'identity.authenticate.' + $.payload.outcome + type: 'delta' + unit: 'user' + volume: 1 + event_type: + - 'identity.authenticate' + resource_id: $.payload.initiator.id + user_id: $.payload.initiator.id + + # DNS + - name: 'dns.domain.exists' + event_type: 'dns.domain.exists' + type: 'cumulative' + unit: 's' + volume: + fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] + plugin: 'timedelta' + project_id: $.payload.tenant_id + resource_id: $.payload.id + user_id: $.ctxt.user + metadata: + status: $.payload.status + pool_id: $.payload.pool_id + host: $.publisher_id + + # Trove + - name: 'trove.instance.exists' + event_type: 'trove.instance.exists' + type: 'cumulative' + unit: 's' + volume: + fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] + plugin: 'timedelta' + project_id: $.payload.tenant_id + resource_id: $.payload.instance_id + user_id: $.payload.user_id + metadata: + nova_instance_id: $.payload.nova_instance_id + state: $.payload.state + service_id: $.payload.service_id + instance_type: $.payload.instance_type + instance_type_id: $.payload.instance_type_id + + # Manila + - name: 'manila.share.size' + event_type: + - 'share.create.*' + - 'share.delete.*' + - 'share.extend.*' + - 'share.shrink.*' + type: 'gauge' + unit: 'GB' + volume: $.payload.size + user_id: $.payload.user_id + project_id: $.payload.project_id + resource_id: $.payload.share_id + metadata: + name: $.payload.name + host: $.payload.host + status: $.payload.status + availability_zone: $.payload.availability_zone + protocol: $.payload.proto + + polling: + sources: + - name: all_pollsters + interval: 300 + meters: + - "*" + pipeline: + sources: + - name: meter_source + meters: + - "*" + sinks: + - meter_sink + sinks: + - name: meter_sink + publishers: + - gnocchi + policy: {} + audit_api_map: + DEFAULT: + target_endpoint_type: None + path_keywords: + meters: meter_name + resources: resource_id + statistics: None + samples: sample_id + service_endpoints: + metering: service/metering + rally_tests: + CeilometerStats.create_meter_and_get_stats: + - args: + user_id: user-id + resource_id: resource-id + counter_volume: 1 + counter_unit: '' + counter_type: cumulative + runner: + type: constant + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + CeilometerMeters.list_meters: + - runner: + type: constant + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + context: + ceilometer: + counter_name: benchmark_meter + counter_type: gauge + counter_unit: "%" + counter_volume: 1 + resources_per_tenant: 1 + samples_per_resource: 1 + timestamp_interval: 10 + metadata_list: + - status: active + name: rally benchmark on + deleted: 'false' + - status: terminated + name: rally benchmark off + deleted: 'true' + args: + limit: 5 + metadata_query: + status: terminated + CeilometerQueries.create_and_query_samples: + - args: + filter: + "=": + counter_unit: instance + orderby: + limit: 10 + counter_name: cpu_util + counter_type: gauge + counter_unit: instance + counter_volume: 1 + resource_id: resource_id + runner: + type: constant + times: 1 + concurrency: 1 + sla: + failure_rate: + max: 0 + +dependencies: + dynamic: + common: + local_image_registry: + jobs: + - ceilometer-image-repo-sync + services: + - endpoint: node + service: local_image_registry + static: + central: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + ipmi: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + compute: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + db_sync: + jobs: [] + services: [] + ks_service: + services: + - endpoint: internal + service: identity + ks_user: + services: + - endpoint: internal + service: identity + rabbit_init: + services: + - service: oslo_messaging + endpoint: internal + notification: + jobs: + - ceilometer-db-sync + - ceilometer-rabbit-init + - ceilometer-ks-user + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metric + tests: + services: + - endpoint: internal + service: identity + - endpoint: internal + service: metering + - endpoint: internal + service: metric + image_repo_sync: + services: + - endpoint: internal + service: local_image_registry + +# Names of secrets used by bootstrap and environmental checks +secrets: + identity: + admin: ceilometer-keystone-admin + ceilometer: ceilometer-keystone-user + test: ceilometer-keystone-test + oslo_messaging: + admin: ceilometer-rabbitmq-admin + ceilometer: ceilometer-rabbitmq-user + oci_image_registry: + ceilometer: ceilometer-oci-image-registry + +bootstrap: + enabled: false + ks_user: ceilometer + script: | + openstack token issue + +# typically overridden by environmental +# values, but should include all endpoints +# required by this chart +endpoints: + cluster_domain_suffix: cluster.local + local_image_registry: + name: docker-registry + namespace: docker-registry + hosts: + default: localhost + internal: docker-registry + node: localhost + host_fqdn_override: + default: null + port: + registry: + node: 5000 + oci_image_registry: + name: oci-image-registry + namespace: oci-image-registry + auth: + enabled: false + ceilometer: + username: ceilometer + password: password + hosts: + default: localhost + host_fqdn_override: + default: null + port: + registry: + default: null + identity: + name: keystone + auth: + admin: + region_name: RegionOne + username: admin + password: password + project_name: admin + user_domain_name: default + project_domain_name: default + ceilometer: + role: admin + region_name: RegionOne + username: ceilometer + password: password + project_name: service + user_domain_name: service + project_domain_name: service + test: + role: admin + region_name: RegionOne + username: ceilometer-test + password: password + project_name: test + user_domain_name: service + project_domain_name: service + hosts: + default: keystone + internal: keystone-api + host_fqdn_override: + default: null + path: + default: /v3 + scheme: + default: 'http' + port: + api: + default: 5000 + public: 80 + internal: 5000 + service: 5000 + metric: + name: gnocchi + hosts: + default: gnocchi-api + public: gnocchi + host_fqdn_override: + default: null + path: + default: null + scheme: + default: 'http' + port: + api: + default: 8041 + public: 80 + internal: 8041 + service: 8041 + alarming: + name: aodh + hosts: + default: aodh-api + public: aodh + host_fqdn_override: + default: null + path: + default: null + scheme: + default: 'http' + port: + api: + default: 8042 + public: 80 + oslo_cache: + auth: + # NOTE(portdirect): this is used to define the value for keystone + # authtoken cache encryption key, if not set it will be populated + # automatically with a random value, but to take advantage of + # this feature all services should be set to use the same key, + # and memcache service. + memcache_secret_key: null + hosts: + default: memcached + host_fqdn_override: + default: null + port: + memcache: + default: 11211 + oslo_messaging: + auth: + admin: + username: rabbitmq + password: password + ceilometer: + username: ceilometer + password: password + statefulset: + replicas: 2 + name: rabbitmq-rabbitmq + hosts: + default: rabbitmq + host_fqdn_override: + default: null + path: /ceilometer + scheme: rabbit + port: + amqp: + default: 5672 + http: + default: 15672 + +pod: + affinity: + anti: + type: + default: preferredDuringSchedulingIgnoredDuringExecution + topologyKey: + default: kubernetes.io/hostname + weight: + default: 10 + tolerations: + ceilometer: + enabled: false + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + mounts: + ceilometer_tests: + init_container: null + ceilometer_tests: + volumeMounts: + volumes: + ceilometer_compute: + init_container: null + ceilometer_compute: + volumeMounts: + volumes: + ceilometer_central: + init_container: null + ceilometer_central: + volumeMounts: + volumes: + ceilometer_ipmi: + init_container: null + ceilometer_ipmi: + volumeMounts: + volumes: + ceilometer_notification: + init_container: null + ceilometer_notification: + volumeMounts: + volumes: + ceilometer_db_sync: + ceilometer_db_sync: + volumeMounts: + volumes: + replicas: + central: 1 + notification: 1 + lifecycle: + upgrades: + deployments: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + daemonsets: + pod_replacement_strategy: RollingUpdate + compute: + enabled: true + min_ready_seconds: 0 + max_unavailable: 1 + resources: + enabled: false + compute: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + notification: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + central: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ipmi: + requests: + memory: "124Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + jobs: + db_sync: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + rabbit_init: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_service: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_user: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + tests: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + image_repo_sync: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + +network_policy: + ceilometer: + ingress: + - {} + egress: + - {} + +manifests: + configmap_bin: true + configmap_etc: true + deployment_api: false + deployment_central: true + deployment_collector: false + daemonset_compute: true + daemonset_ipmi: false + deployment_notification: true + ingress_api: false + job_bootstrap: true + job_db_drop: false + # using gnocchi so no db init + job_db_init: false + job_db_init_mongodb: false + # runs ceilometer-upgrade which inits resource types in gnocchi! + job_db_sync: true + job_image_repo_sync: true + job_ks_endpoints: false + job_ks_service: true + job_ks_user: true + job_rabbit_init: true + pdb_api: true + pod_rally_test: true + network_policy: false + secret_db: true + secret_keystone: true + secret_mongodb: false + secret_rabbitmq: true + secret_registry: true + service_api: true + service_ingress_api: true +... diff --git a/helm-configs/cinder/cinder-helm-overrides.yaml b/helm-configs/cinder/cinder-helm-overrides.yaml index 9d15d034..136fa62c 100644 --- a/helm-configs/cinder/cinder-helm-overrides.yaml +++ b/helm-configs/cinder/cinder-helm-overrides.yaml @@ -1320,7 +1320,7 @@ endpoints: username: cinder password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /cinder diff --git a/helm-configs/glance/glance-helm-overrides.yaml b/helm-configs/glance/glance-helm-overrides.yaml index b3b19a86..6a5ad87c 100644 --- a/helm-configs/glance/glance-helm-overrides.yaml +++ b/helm-configs/glance/glance-helm-overrides.yaml @@ -589,7 +589,7 @@ endpoints: username: glance password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /glance diff --git a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml index 7ade5b93..9110f4e9 100644 --- a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml +++ b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml @@ -234,7 +234,7 @@ pod: init_container: null gnocchi_tests: replicas: - api: 1 + api: 3 lifecycle: upgrades: deployments: @@ -246,11 +246,11 @@ pod: daemonsets: pod_replacement_strategy: RollingUpdate metricd: - enabled: false + enabled: true min_ready_seconds: 0 max_unavailable: 1 statsd: - enabled: false + enabled: true min_ready_seconds: 0 max_unavailable: 1 disruption_budget: @@ -622,7 +622,7 @@ endpoints: username: gnocchi password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /gnocchi diff --git a/helm-configs/heat/heat-helm-overrides.yaml b/helm-configs/heat/heat-helm-overrides.yaml index b27640c1..138eb76e 100644 --- a/helm-configs/heat/heat-helm-overrides.yaml +++ b/helm-configs/heat/heat-helm-overrides.yaml @@ -859,7 +859,7 @@ endpoints: username: heat password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /heat diff --git a/helm-configs/horizon/horizon-helm-overrides.yaml b/helm-configs/horizon/horizon-helm-overrides.yaml index 298f8238..4563074f 100644 --- a/helm-configs/horizon/horizon-helm-overrides.yaml +++ b/helm-configs/horizon/horizon-helm-overrides.yaml @@ -7242,7 +7242,7 @@ endpoints: username: horizon password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /horizon diff --git a/helm-configs/keystone/keystone-helm-overrides.yaml b/helm-configs/keystone/keystone-helm-overrides.yaml index 09667ed0..cb6f0481 100644 --- a/helm-configs/keystone/keystone-helm-overrides.yaml +++ b/helm-configs/keystone/keystone-helm-overrides.yaml @@ -972,7 +972,7 @@ endpoints: username: keystone password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /keystone diff --git a/helm-configs/loki/loki-helm-minio-overrides-example.yaml b/helm-configs/loki/loki-helm-minio-overrides-example.yaml index c77ada3a..83d76f72 100644 --- a/helm-configs/loki/loki-helm-minio-overrides-example.yaml +++ b/helm-configs/loki/loki-helm-minio-overrides-example.yaml @@ -5,3 +5,5 @@ minio: loki: auth_enabled: false configStorageType: Secret + ingester: + autoforget_unhealthy: true diff --git a/helm-configs/loki/loki-helm-s3-overrides-example.yaml b/helm-configs/loki/loki-helm-s3-overrides-example.yaml index 09730acf..e9a95086 100644 --- a/helm-configs/loki/loki-helm-s3-overrides-example.yaml +++ b/helm-configs/loki/loki-helm-s3-overrides-example.yaml @@ -5,6 +5,8 @@ minio: loki: auth_enabled: false configStorageType: Secret + ingester: + autoforget_unhealthy: true storage: bucketNames: chunks: < CHUNKS BUCKET NAME > # TODO: Update with relevant bucket name for chunks diff --git a/helm-configs/loki/loki-helm-swift-overrides-example.yaml b/helm-configs/loki/loki-helm-swift-overrides-example.yaml index 5e4155be..a28ae500 100644 --- a/helm-configs/loki/loki-helm-swift-overrides-example.yaml +++ b/helm-configs/loki/loki-helm-swift-overrides-example.yaml @@ -5,6 +5,8 @@ minio: loki: auth_enabled: false configStorageType: Secret + ingester: + autoforget_unhealthy: true storage: bucketNames: chunks: chunks diff --git a/helm-configs/neutron/neutron-helm-overrides.yaml b/helm-configs/neutron/neutron-helm-overrides.yaml index ac8f036f..2e9dbdd0 100644 --- a/helm-configs/neutron/neutron-helm-overrides.yaml +++ b/helm-configs/neutron/neutron-helm-overrides.yaml @@ -2199,7 +2199,7 @@ endpoints: username: neutron password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /neutron diff --git a/helm-configs/nginx-gateway-fabric/helm-overrides.yaml b/helm-configs/nginx-gateway-fabric/helm-overrides.yaml new file mode 100644 index 00000000..87b62ae7 --- /dev/null +++ b/helm-configs/nginx-gateway-fabric/helm-overrides.yaml @@ -0,0 +1,117 @@ +nginxGateway: + ## The kind of the NGINX Gateway Fabric installation - currently, only deployment is supported. + kind: deployment + ## gatewayClassName is the name of the GatewayClass that will be created as part of this release. Every NGINX Gateway + ## Fabric must have a unique corresponding GatewayClass resource. NGINX Gateway Fabric only processes resources that + ## belong to its class - i.e. have the "gatewayClassName" field resource equal to the class. + gatewayClassName: nginx + ## The name of the Gateway controller. The controller name must be of the form: DOMAIN/PATH. The controller's domain + ## is gateway.nginx.org. + gatewayControllerName: gateway.nginx.org/nginx-gateway-controller + ## The dynamic configuration for the control plane that is contained in the NginxGateway resource. + config: + logging: + ## Log level. Supported values "info", "debug", "error". + level: info + ## The number of replicas of the NGINX Gateway Fabric Deployment. + replicaCount: 1 + ## The configuration for leader election. + leaderElection: + ## Enable leader election. Leader election is used to avoid multiple replicas of the NGINX Gateway Fabric + ## reporting the status of the Gateway API resources. If not enabled, all replicas of NGINX Gateway Fabric + ## will update the statuses of the Gateway API resources. + enable: true + ## The name of the leader election lock. A Lease object with this name will be created in the same Namespace as + ## the controller. Autogenerated if not set or set to "". + lockName: "" + + ## Defines the settings for the control plane readiness probe. This probe returns Ready when the controller + ## has started and configured NGINX to serve traffic. + readinessProbe: + ## Enable the /readyz endpoint on the control plane. + enable: true + ## Port in which the readiness endpoint is exposed. + port: 8081 + ## The number of seconds after the Pod has started before the readiness probes are initiated. + initialDelaySeconds: 3 + + image: + ## The NGINX Gateway Fabric image to use + repository: ghcr.io/nginxinc/nginx-gateway-fabric + tag: 1.1.0 + pullPolicy: IfNotPresent + + securityContext: + ## Some environments may need this set to true in order for the control plane to successfully reload NGINX. + allowPrivilegeEscalation: false + + ## The lifecycle of the nginx-gateway container. + lifecycle: {} + + ## extraVolumeMounts are the additional volume mounts for the nginx-gateway container. + extraVolumeMounts: [] + +nginx: + ## The NGINX image to use + image: + repository: ghcr.io/nginxinc/nginx-gateway-fabric/nginx + tag: 1.1.0 + pullPolicy: IfNotPresent + + ## The lifecycle of the nginx container. + lifecycle: {} + + ## extraVolumeMounts are the additional volume mounts for the nginx container. + extraVolumeMounts: [] + +## The termination grace period of the NGINX Gateway Fabric pod. +terminationGracePeriodSeconds: 30 + +## Tolerations for the NGINX Gateway Fabric pod. +tolerations: [] + +## The affinity of the NGINX Gateway Fabric pod. +affinity: {} + +serviceAccount: + annotations: {} + ## The name of the service account of the NGINX Gateway Fabric pods. Used for RBAC. + ## Autogenerated if not set or set to "". + # name: nginx-gateway + +service: + ## Creates a service to expose the NGINX Gateway Fabric pods. + create: true + ## The type of service to create for the NGINX Gateway Fabric. + type: LoadBalancer + ## The externalTrafficPolicy of the service. The value Local preserves the client source IP. + externalTrafficPolicy: Local + ## The annotations of the NGINX Gateway Fabric service. + annotations: + "metallb.universe.tf/address-pool": "openstack-external" + "metallb.universe.tf/allow-shared-ip": "openstack-external-svc" + + ## A list of ports to expose through the NGINX Gateway Fabric service. Update it to match the listener ports from + ## your Gateway resource. Follows the conventional Kubernetes yaml syntax for service ports. + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + +metrics: + ## Enable exposing metrics in the Prometheus format. + enable: true + ## Set the port where the Prometheus metrics are exposed. Format: [1024 - 65535] + port: 9113 + ## Enable serving metrics via https. By default metrics are served via http. + ## Please note that this endpoint will be secured with a self-signed certificate. + secure: false + +## extraVolumes for the NGINX Gateway Fabric pod. Use in conjunction with +## nginxGateway.extraVolumeMounts and nginx.extraVolumeMounts to mount additional volumes to the containers. +extraVolumes: [] diff --git a/helm-configs/nova/nova-helm-overrides.yaml b/helm-configs/nova/nova-helm-overrides.yaml index d6923c6c..75e07d39 100644 --- a/helm-configs/nova/nova-helm-overrides.yaml +++ b/helm-configs/nova/nova-helm-overrides.yaml @@ -1375,7 +1375,7 @@ conf: default_ephemeral_format: ext4 ram_allocation_ratio: 1.0 disk_allocation_ratio: 1.0 - cpu_allocation_ratio: 3.0 + cpu_allocation_ratio: 8.0 state_path: /var/lib/nova osapi_compute_listen: 0.0.0.0 # NOTE(portdirect): the bind port should not be defined, and is manipulated @@ -1640,7 +1640,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /nova @@ -1657,7 +1657,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /nova_api @@ -1674,7 +1674,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /nova_cell0 diff --git a/helm-configs/octavia/octavia-helm-overrides.yaml b/helm-configs/octavia/octavia-helm-overrides.yaml index 1a30a9e2..2865d4c9 100644 --- a/helm-configs/octavia/octavia-helm-overrides.yaml +++ b/helm-configs/octavia/octavia-helm-overrides.yaml @@ -466,7 +466,7 @@ endpoints: username: octavia password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /octavia diff --git a/helm-configs/placement/placement-helm-overrides.yaml b/helm-configs/placement/placement-helm-overrides.yaml index 9d85dd6e..f6a2cc8c 100644 --- a/helm-configs/placement/placement-helm-overrides.yaml +++ b/helm-configs/placement/placement-helm-overrides.yaml @@ -206,7 +206,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: maxscale-galera host_fqdn_override: default: null path: /placement diff --git a/helm-configs/postgresql/postgresql-helm-overrides.yaml b/helm-configs/postgresql/postgresql-helm-overrides.yaml index 798aad04..ad41ea06 100644 --- a/helm-configs/postgresql/postgresql-helm-overrides.yaml +++ b/helm-configs/postgresql/postgresql-helm-overrides.yaml @@ -224,9 +224,9 @@ dependencies: monitoring: prometheus: - enabled: true + enabled: false postgresql_exporter: - scrape: true + scrape: false volume: backup: @@ -239,7 +239,7 @@ jobs: # activeDeadlineSeconds == 0 means no deadline activeDeadlineSeconds: 0 backoffLimit: 6 - cron: "0 0 * * *" + cron: "15 0 * * *" history: success: 3 failed: 1 @@ -300,12 +300,12 @@ conf: hba_file: '/tmp/pg_hba.conf' ident_file: '/tmp/pg_ident.conf' backup: - enabled: false + enabled: true base_path: /var/backup days_to_keep: 3 pg_dumpall_options: '--inserts --clean' remote_backup: - enabled: false + enabled: true container_name: postgresql days_to_keep: 14 storage_policy: default-placement @@ -466,7 +466,7 @@ manifests: configmap_etc: true job_image_repo_sync: true network_policy: false - job_ks_user: false + job_ks_user: true secret_admin: true secret_etc: true secret_audit: true @@ -474,14 +474,14 @@ manifests: secret_registry: true service: true statefulset: true - cron_job_postgresql_backup: false - pvc_backup: false + cron_job_postgresql_backup: true + pvc_backup: true monitoring: prometheus: - configmap_bin: true - configmap_etc: true - deployment_exporter: true - job_user_create: true - secret_etc: true - service_exporter: true + configmap_bin: false + configmap_etc: false + deployment_exporter: false + job_user_create: false + secret_etc: false + service_exporter: false ... diff --git a/kustomize/cinder/base/cinder-mariadb-database.yaml b/kustomize/cinder/base/cinder-mariadb-database.yaml index ae676839..94076c21 100644 --- a/kustomize/cinder/base/cinder-mariadb-database.yaml +++ b/kustomize/cinder/base/cinder-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: cinder namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: cinder namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: cinder-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/cinder/base/cinder-rabbitmq-queue.yaml b/kustomize/cinder/base/cinder-rabbitmq-queue.yaml index e72ce2ea..b4e3b4bd 100644 --- a/kustomize/cinder/base/cinder-rabbitmq-queue.yaml +++ b/kustomize/cinder/base/cinder-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: cinder namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: cinder-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "cinder" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: cinder-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: cinder-qq # name of the queue vhost: "cinder" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: cinder-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "cinder" # name of a vhost userReference: diff --git a/kustomize/glance/base/glance-mariadb-database.yaml b/kustomize/glance/base/glance-mariadb-database.yaml index ce92a42c..5f3f540b 100644 --- a/kustomize/glance/base/glance-mariadb-database.yaml +++ b/kustomize/glance/base/glance-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: glance namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: glance namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: glance-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/glance/base/glance-rabbitmq-queue.yaml b/kustomize/glance/base/glance-rabbitmq-queue.yaml index 7ced8174..ec1aa1ca 100644 --- a/kustomize/glance/base/glance-rabbitmq-queue.yaml +++ b/kustomize/glance/base/glance-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: glance namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: glance-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "glance" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: glance-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: glance-qq # name of the queue vhost: "glance" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: glance-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "glance" # name of a vhost userReference: diff --git a/kustomize/grafana/base/azure-client-secret.yaml b/kustomize/grafana/base/azure-client-secret.yaml new file mode 100644 index 00000000..e707a00c --- /dev/null +++ b/kustomize/grafana/base/azure-client-secret.yaml @@ -0,0 +1,9 @@ +apiversion: v1 +data: + client_id: base64_encoded_client_id + client_secret: base64_encoded_client_secret +kind: secret +metadata: + name: azure-client + namespace: grafana +type: opaque diff --git a/kustomize/grafana/base/datasources.yaml b/kustomize/grafana/base/datasources.yaml new file mode 100644 index 00000000..6ae7e3a3 --- /dev/null +++ b/kustomize/grafana/base/datasources.yaml @@ -0,0 +1,14 @@ +datasources: + datasources.yaml: + apiversion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + url: http://kube-prometheus-stack-prometheus.prometheus.svc.cluster.local:9090 + isdefault: true + - name: loki + type: loki + access: proxy + url: http://loki-gateway.{{ $.Release.Namespace }}.svc.cluster.local:80 + editable: false diff --git a/kustomize/grafana/base/example-cert.pem b/kustomize/grafana/base/example-cert.pem new file mode 100644 index 00000000..90e2af6b --- /dev/null +++ b/kustomize/grafana/base/example-cert.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0DCCARIGAWIBAGIBATANBGKQHKIG9W0BAQUFADB/MQSWCQYDVQQGEWJGUJET +MBEGA1UECAWKU29TZS1TDGF0ZTEOMAWGA1UEBWWFUGFYAXMXDTALBGNVBAOMBERP +BWKXDTALBGNVBASMBE5TQLUXEDAOBGNVBAMMB0RPBWKGQ0EXGZAZBGKQHKIG9W0B +CQEWDGRPBWLAZGLTAS5MCJAEFW0XNDAXMJGYMDM2NTVAFW0YNDAXMJYYMDM2NTVA +MFSXCZAJBGNVBAYTAKZSMRMWEQYDVQQIDAPTB21LLVN0YXRLMSEWHWYDVQQKDBHJ +BNRLCM5LDCBXAWRNAXRZIFB0ESBMDGQXFDASBGNVBAMMC3D3DY5KAW1PLMZYMIIB +IJANBGKQHKIG9W0BAQEFAAOCAQ8AMIIBCGKCAQEAVPNAPKLIKDVX98KW68LZ8PGA +RRCYERSNGQPJPIFMVJJE8LUCOXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWX +SXITRW99HBFAL1MDQYWCUKOEB9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P +1NCVW+6B/AAN9L1G2PQXGRDYC/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYB +AKJQETWWV6DFK/GRDOSED/6BW+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAU +ZKCHSRYC/WHVURX6O85D6QPZYWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWID +AQABO3SWETAJBGNVHRMEAJAAMCWGCWCGSAGG+EIBDQQFFH1PCGVUU1NMIEDLBMVY +YXRLZCBDZXJ0AWZPY2F0ZTADBGNVHQ4EFGQU+TUGFTYN+CXE1WXUQEA7X+YS3BGW +HWYDVR0JBBGWFOAUHMWQKBBRGP87HXFVWGPNLGGVR64WDQYJKOZIHVCNAQEFBQAD +GGEBAIEEMQQHEZEXZ4CKHE5UM9VCKZKJ5IV9TFS/A9CCQUEPZPLT7YVMEVBFNOC0 ++1ZYR4TXGI4+5MHGZHYCIVVHO4HKQYM+J+O5MWQINF1QOAHUO7CLD3WNA1SKCVUV +VEPIXC/1AHZRG+DPEEHT0MDFFOW13YDUC2FH6AQEDCEL4AV5PXQ2EYR8HR4ZKBC1 +FBTUQUSVA8NWSIYZQ16FYGVE+ANF6VXVUIZYVWDRPRV/KFVLNA3ZPNLMMXU98MVH +PXY3PKB8++6U4Y3VDK2NI2WYYLILS8YQBM4327IKMKDC2TIMS8U60CT47MKU7ADY +CBTV5RDKRLAYWM5YQLTIGLVCV7O= +-----END CERTIFICATE----- diff --git a/kustomize/grafana/base/example-key.pem b/kustomize/grafana/base/example-key.pem new file mode 100644 index 00000000..18e01dee --- /dev/null +++ b/kustomize/grafana/base/example-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEOWIBAAKCAQEAVPNAPKLIKDVX98KW68LZ8PGARRCYERSNGQPJPIFMVJJE8LUC +OXGPU0HEPNNTUJPSHBNYNKCVRTWHN+HAKBSP+QWXSXITRW99HBFAL1MDQYWCUKOE +B9CW6INCTVUN4IRVKN9T8E6Q174RBCNWA/7YTC7P1NCVW+6B/AAN9L1G2PQXGRDY +C/+G6O1IZEHTWHQZE97NY5QKNUUVD0V09DC5CDYBAKJQETWWV6DFK/GRDOSED/6B +W+20Z0QSHPA3YNW6QSP+X5PYYMDRZRIR03OS6DAUZKCHSRYC/WHVURX6O85D6QPZ +YWO8XWNALZHXTQPGCIA5SU9ZIYTV9LH2E+LSWWIDAQABAOIBAFML8CD9A5PMQLW3 +F9BTTQZ1SRL4FVP7CMHSXHVJSJEHWHHCKEE0OBKWTRSGKTSM1XLU5W8IITNHN0+1 +INR+78EB+RRGNGDAXH8DIODKEY+8/CEE8TFI3JYUTKDRLXMBWIKSOUVVIUMOQ3FX +OGQYWQ0Z2L/PVCWY/Y82FFQ3YSC5GAJSBBYSCRG14BQO44ULRELE4SDWS5HCJKYB +EI2B8COMUCQZSOTXG9NILN/JE2BO/I2HGSAWIBGCODBMS8K6TVSSRZMR3KJ5O6J+ +77LGWKH37BRVGBVYVBQ6NWPL0XLG7DUV+7LWEO5QQAPY6AXB/ZBCKQLQU6/EJOVE +YDG5JQECGYEA9KKFTZD/WEVAREA0DZFEJRU8VLNWOAGL7CJAODXQXOS4MCR5MPDT +KBWGFKLFFH/AYUNPBLK6BCJP1XK67B13ETUA3I9Q5T1WUZEOBIKKBLFM9DDQJT43 +UKZWJXBKFGSVFRYPTGZST719MZVCPCT2CZPJEGN3HLPT6FYW3EORNOECGYEAXIOU +JWXCOMUGAB7+OW2TR0PGEZBVVLEGDKAJ6TC/HOKM1A8R2U4HLTEJJCRLLTFW++4I +DDHE2DLER4Q7O58SFLPHWGPMLDEZN7WRLGR7VYFUV7VMAHJGUC3GV9AGNHWDLA2Q +GBG9/R9OVFL0DC7CGJGLEUTITCYC31BGT3YHV0MCGYEA4K3DG4L+RN4PXDPHVK9I +PA1JXAJHEIFEHNAW1D3VWKBSKVJMGVF+9U5VEV+OWRHN1QZPZV4SURI6M/8LK8RA +GR4UNM4AQK4K/QKY4G05LKRIK9EV2CGQSLQDRA7CJQ+JN3NB50QG6HFNFPAFN+J7 +7JUWLN08WFYV4ATPDD+9XQECGYBXIZKZFL+9IQKFOCONVWAZGO+DQ1N0L3J4ITIK +W56CKWXYJ88D4QB4EUU3YJ4UB4S9MIAW/ELEWKZIBWPUPFAN0DB7I6H3ZMP5ZL8Q +QS3NQCB9DULMU2/TU641ERUKAMIOKA1G9SNDKAZUWO+O6FDKIB1RGOBK9XNN8R4R +PSV+AQKBGB+CICEXR30VYCV5BNZN9EFLIXNKAEMJURYCXCRQNVRNUIUBVAO8+JAE +CDLYGS5RTGOLZIB0IVERQWSP3EI1ACGULTS0VQ9GFLQGAN1SAMS40C9KVNS1MLDU +LHIHYPJ8USCVT5SNWO2N+M+6ANH5TPWDQNEK6ZILH4TRBUZAIHGB +-----END RSA PRIVATE KEY----- diff --git a/kustomize/grafana/base/grafana-database.yaml b/kustomize/grafana/base/grafana-database.yaml new file mode 100644 index 00000000..f8e57070 --- /dev/null +++ b/kustomize/grafana/base/grafana-database.yaml @@ -0,0 +1,59 @@ +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Database +metadata: + name: grafana + namespace: openstack + annotations: + helm.sh/resource-policy: keep + labels: + app.kubernetes.io/managed-by: Helm +spec: + # If you want the database to be created with a different name than the resource name + # name: data-custom + mariaDbRef: + name: mariadb-galera + characterSet: utf8 + collate: utf8_general_ci + requeueInterval: 30s + retryInterval: 5s +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: User +metadata: + name: grafana + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + # If you want the user to be created with a different name than the resource name + # name: user-custom + mariaDbRef: + name: mariadb-galera + passwordSecretKeyRef: + name: grafana-db + key: password + # This field is immutable and defaults to 10 + maxUserConnections: 20 + host: "%" + requeueInterval: 30s + retryInterval: 5s +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Grant +metadata: + name: grant + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + mariaDbRef: + name: mariadb-galera + privileges: + - "ALL" + database: "grafana" + table: "*" + username: grafana + grantOption: true + host: "%" + requeueInterval: 30s + retryInterval: 5s diff --git a/kustomize/grafana/base/grafana-values.yaml b/kustomize/grafana/base/grafana-values.yaml new file mode 100644 index 00000000..13b92f7a --- /dev/null +++ b/kustomize/grafana/base/grafana-values.yaml @@ -0,0 +1,58 @@ +#### EDIT THESE TWO VARIABLES WITH YOUR VALUES +custom_host: grafana.example.com # TODO: update this value to the FQDN of your grafana site +tenant_id: 122333 # TODO: update this value to use your Azure Tenant ID +#### + +ingress: + enabled: true + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + path: / + pathType: ImplementationSpecific + + hosts: + - "{{ .Values.custom_host }}" # Ref: custom_host variable above + tls: + - hosts: + - "{{ .Values.custom_host }}" # Ref: custom_host variable above + secretName: grafana-tls-public + + +extraSecretMounts: + - name: azure-client-secret-mount + secretName: azure-client + defaultMode: 0440 + mountPath: /etc/secrets/azure-client + readOnly: true +nodeSelector: + openstack-control-plane: enabled +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ .Values.custom_host }}" # Ref: custom_host variable above + root_url: "https://{{ .Values.custom_host }}" # Ref: custom_host variable above + auth.azuread: + name: Azure AD + enabled: true + allow_sign_up: true + auto_login: false + client_id: $__file{/etc/secrets/azure-client/client_id} + client_secret: $__file{/etc/secrets/azure-client/client_secret} + scopes: openid email profile + auth_url: "https://login.microsoftonline.com/{{ .Values.tenant_id }}/oauth2/v2.0/authorize" + token_url: "https://login.microsoftonline.com/{{ .Values.tenant_id }}/oauth2/v2.0/token" + allowed_organizations: "{{ .Values.tenant_id }}" + role_attribute_strict: false + allow_assign_grafana_admin: false + skip_org_role_sync: false + use_pkce: true diff --git a/kustomize/grafana/base/kustomization.yaml b/kustomize/grafana/base/kustomization.yaml new file mode 100644 index 00000000..f50c4088 --- /dev/null +++ b/kustomize/grafana/base/kustomization.yaml @@ -0,0 +1,13 @@ +resources: + - ns-grafana.yaml + - azure-client-secret.yaml + - grafana-database.yaml + +helmCharts: + - name: grafana + repo: https://grafana.github.io/helm-charts + releaseName: grafana + namespace: grafana + valuesFile: grafana-values.yaml + additionalValuesFiles: + - datasources.yaml diff --git a/kustomize/grafana/base/ns-grafana.yaml b/kustomize/grafana/base/ns-grafana.yaml new file mode 100644 index 00000000..d780fe65 --- /dev/null +++ b/kustomize/grafana/base/ns-grafana.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + kubernetes.io/metadata.name: grafana + name: grafana + name: grafana diff --git a/kustomize/heat/base/heat-mariadb-database.yaml b/kustomize/heat/base/heat-mariadb-database.yaml index dbced123..76bd3eac 100644 --- a/kustomize/heat/base/heat-mariadb-database.yaml +++ b/kustomize/heat/base/heat-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: heat namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: heat namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: heat-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/horizon/base/horizon-mariadb-database.yaml b/kustomize/horizon/base/horizon-mariadb-database.yaml index 7bc91e33..2daf6706 100644 --- a/kustomize/horizon/base/horizon-mariadb-database.yaml +++ b/kustomize/horizon/base/horizon-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: horizon namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: horizon namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: horizon-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/keystone/base/keystone-mariadb-database.yaml b/kustomize/keystone/base/keystone-mariadb-database.yaml index c9fcd34f..94865021 100644 --- a/kustomize/keystone/base/keystone-mariadb-database.yaml +++ b/kustomize/keystone/base/keystone-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: keystone namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: keystone namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: keystone-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/keystone/base/keystone-rabbitmq-queue.yaml b/kustomize/keystone/base/keystone-rabbitmq-queue.yaml index e55e4192..7972f194 100644 --- a/kustomize/keystone/base/keystone-rabbitmq-queue.yaml +++ b/kustomize/keystone/base/keystone-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: keystone namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: keystone-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "keystone" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: keystone-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: keystone-qq # name of the queue vhost: "keystone" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: keystone-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "keystone" # name of a vhost userReference: diff --git a/kustomize/mariadb-cluster/aio/kustomization.yaml b/kustomize/mariadb-cluster/aio/kustomization.yaml index 5b0a66d9..7e5e7e83 100644 --- a/kustomize/mariadb-cluster/aio/kustomization.yaml +++ b/kustomize/mariadb-cluster/aio/kustomization.yaml @@ -12,3 +12,10 @@ patches: - op: replace path: /spec/galera/enabled value: false + - target: + kind: MaxScale + name: maxscale-galera + patch: |- + - op: replace + path: /spec/replicas + value: 1 diff --git a/kustomize/mariadb-cluster/base/kustomization.yaml b/kustomize/mariadb-cluster/base/kustomization.yaml index f297b151..c074ce60 100644 --- a/kustomize/mariadb-cluster/base/kustomization.yaml +++ b/kustomize/mariadb-cluster/base/kustomization.yaml @@ -1,4 +1,5 @@ resources: - mariadb-configmap.yaml + - mariadb-maxscale.yaml - mariadb-galera.yaml - mariadb-backup.yaml diff --git a/kustomize/mariadb-cluster/base/mariadb-galera.yaml b/kustomize/mariadb-cluster/base/mariadb-galera.yaml index 89f1d6ea..f0bff164 100644 --- a/kustomize/mariadb-cluster/base/mariadb-galera.yaml +++ b/kustomize/mariadb-cluster/base/mariadb-galera.yaml @@ -28,21 +28,8 @@ spec: runAsUser: 0 # point to an existing MaxScale instance. Doing this will delegate tasks such as primary failover to MaxScale. - # maxScaleRef: - # name: maxscale - - # provision a MaxScale instance and set 'spec.maxScaleRef' automatically. - maxScale: - enabled: false - - kubernetesService: - type: LoadBalancer - annotations: - metallb.universe.tf/address-pool: primary - - connection: - secretName: mxs-galera-conn - port: 3306 + maxScaleRef: + name: maxscale-galera galera: enabled: true @@ -54,7 +41,7 @@ spec: galeraLibPath: /usr/lib/galera/libgalera_smm.so replicaThreads: 1 agent: - image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.26 + image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27 port: 5555 kubernetesAuth: enabled: true @@ -67,7 +54,7 @@ spec: podRecoveryTimeout: 3m podSyncTimeout: 3m initContainer: - image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.26 + image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27 config: reuseStorageVolume: false volumeClaimTemplate: @@ -132,8 +119,6 @@ spec: resources: requests: memory: 256Mi - limits: - memory: 16Gi metrics: enabled: true diff --git a/kustomize/mariadb-cluster/base/mariadb-maxscale.yaml b/kustomize/mariadb-cluster/base/mariadb-maxscale.yaml new file mode 100644 index 00000000..350d8ca0 --- /dev/null +++ b/kustomize/mariadb-cluster/base/mariadb-maxscale.yaml @@ -0,0 +1,132 @@ +apiVersion: k8s.mariadb.com/v1alpha1 +kind: MaxScale +metadata: + name: maxscale-galera +spec: + replicas: 3 + + mariaDbRef: + name: mariadb-galera + namespace: openstack + + services: + - name: rw-router + router: readwritesplit + params: + transaction_replay: "true" + transaction_replay_attempts: "10" + transaction_replay_timeout: "5s" + max_slave_connections: "255" + max_replication_lag: "3s" + master_accept_reads: "true" + listener: + name: rw-listener + port: 3306 + protocol: MariaDBProtocol + params: + connection_metadata: "tx_isolation=auto" + suspend: false + suspend: false + - name: rconn-master-router + router: readconnroute + params: + router_options: "master" + max_replication_lag: "3s" + master_accept_reads: "true" + listener: + port: 3307 + - name: rconn-slave-router + router: readconnroute + params: + router_options: "slave" + max_replication_lag: "3s" + listener: + port: 3308 + + monitor: + name: mariadb-monitor + module: galeramon + interval: 2s + cooperativeMonitoring: majority_of_all + params: + disable_master_failback: "false" + available_when_donor: "false" + disable_master_role_setting: "false" + suspend: false + + admin: + port: 8989 + guiEnabled: true + + config: + params: + log_info: "true" + volumeClaimTemplate: + resources: + requests: + storage: 100Mi + accessModes: + - ReadWriteOnce + storageClassName: general + sync: + database: mysql + interval: 5s + timeout: 10s + + auth: + generate: true + adminUsername: mariadb-operator + adminPasswordSecretKeyRef: + name: maxscale + key: password + deleteDefaultAdmin: true + clientUsername: maxscale-galera-client + clientPasswordSecretKeyRef: + name: maxscale + key: password + clientMaxConnections: 1024 + serverUsername: maxscale-galera-server + serverPasswordSecretKeyRef: + name: maxscale + key: password + serverMaxConnections: 1024 + monitorUsername: maxscale-galera-monitor + monitorPasswordSecretKeyRef: + name: maxscale + key: password + monitorMaxConnections: 128 + syncUsername: maxscale-galera-sync + syncPasswordSecretKeyRef: + name: maxscale + key: password + syncMaxConnections: 128 + + securityContext: + allowPrivilegeEscalation: false + + updateStrategy: + type: RollingUpdate + + kubernetesService: + type: LoadBalancer + annotations: + metallb.universe.tf/address-pool: primary + + connection: + secretName: mxs-galera-conn + port: 3306 + + resources: + requests: + memory: 128Mi + + affinity: + enableAntiAffinity: true + + tolerations: + - key: "k8s.mariadb.com/ha" + operator: "Exists" + effect: "NoSchedule" + + podDisruptionBudget: + maxUnavailable: 33% diff --git a/kustomize/mariadb-operator/kustomization.yaml b/kustomize/mariadb-operator/kustomization.yaml index 737d7986..4f78600f 100644 --- a/kustomize/mariadb-operator/kustomization.yaml +++ b/kustomize/mariadb-operator/kustomization.yaml @@ -14,5 +14,5 @@ helmCharts: metrics: enabled: true includeCRDs: true - version: 0.26.0 + version: 0.27.0 namespace: mariadb-system diff --git a/kustomize/neutron/base/neutron-mariadb-database.yaml b/kustomize/neutron/base/neutron-mariadb-database.yaml index 7ae9d6d2..36563b22 100644 --- a/kustomize/neutron/base/neutron-mariadb-database.yaml +++ b/kustomize/neutron/base/neutron-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: neutron namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: neutron namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: neutron-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/neutron/base/neutron-rabbitmq-queue.yaml b/kustomize/neutron/base/neutron-rabbitmq-queue.yaml index 15cb236d..b9617413 100644 --- a/kustomize/neutron/base/neutron-rabbitmq-queue.yaml +++ b/kustomize/neutron/base/neutron-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: neutron namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: neutron-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "neutron" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: neutron-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: neutron-qq # name of the queue vhost: "neutron" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: neutron-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "neutron" # name of a vhost userReference: diff --git a/kustomize/nova/base/nova-mariadb-database.yaml b/kustomize/nova/base/nova-mariadb-database.yaml index 8ee8c90b..150af95d 100644 --- a/kustomize/nova/base/nova-mariadb-database.yaml +++ b/kustomize/nova/base/nova-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: nova namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: Database metadata: name: nova-api namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -33,6 +37,8 @@ kind: Database metadata: name: nova-cell0 namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -48,6 +54,8 @@ kind: User metadata: name: nova namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -66,6 +74,8 @@ kind: Grant metadata: name: nova-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera @@ -83,6 +93,8 @@ kind: Grant metadata: name: nova-api-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera @@ -100,6 +112,8 @@ kind: Grant metadata: name: nova-cell0-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/nova/base/nova-rabbitmq-queue.yaml b/kustomize/nova/base/nova-rabbitmq-queue.yaml index 61077e77..7010af5d 100644 --- a/kustomize/nova/base/nova-rabbitmq-queue.yaml +++ b/kustomize/nova/base/nova-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: nova namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: nova-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "nova" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: nova-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: nova-qq # name of the queue vhost: "nova" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: nova-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "nova" # name of a vhost userReference: diff --git a/kustomize/octavia/base/octavia-agent.yaml b/kustomize/octavia/base/octavia-agent.yaml index 60fc81d8..58fb12ad 100644 --- a/kustomize/octavia/base/octavia-agent.yaml +++ b/kustomize/octavia/base/octavia-agent.yaml @@ -81,7 +81,7 @@ spec: - name: PATH value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/ - name: DEPENDENCY_SERVICE - value: "openstack:mariadb-galera-primary,openstack:keystone-api,openstack:rabbitmq-nodes,openstack:memcached,openstack:neutron-server" + value: "openstack:maxscale-galera,openstack:keystone-api,openstack:rabbitmq-nodes,openstack:memcached,openstack:neutron-server" - name: DEPENDENCY_JOBS value: "octavia-db-sync,octavia-ks-user,octavia-ks-endpoints" - name: DEPENDENCY_DAEMONSET @@ -187,4 +187,4 @@ spec: secretName: octavia-etc defaultMode: 0444 - emptyDir: {} - name: pod-run-octavia \ No newline at end of file + name: pod-run-octavia diff --git a/kustomize/octavia/base/octavia-mariadb-database.yaml b/kustomize/octavia/base/octavia-mariadb-database.yaml index d253d793..66deeea2 100644 --- a/kustomize/octavia/base/octavia-mariadb-database.yaml +++ b/kustomize/octavia/base/octavia-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: octavia namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: octavia namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: octavia-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/octavia/base/octavia-rabbitmq-queue.yaml b/kustomize/octavia/base/octavia-rabbitmq-queue.yaml index a5af8b5b..783061f3 100644 --- a/kustomize/octavia/base/octavia-rabbitmq-queue.yaml +++ b/kustomize/octavia/base/octavia-rabbitmq-queue.yaml @@ -4,6 +4,8 @@ kind: User metadata: name: octavia namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: tags: - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' @@ -19,6 +21,8 @@ kind: Vhost metadata: name: octavia-vhost namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: "octavia" # vhost name; required and cannot be updated defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above @@ -38,6 +42,8 @@ kind: Queue metadata: name: octavia-queue namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: name: octavia-qq # name of the queue vhost: "octavia" # default to '/' if not provided @@ -53,6 +59,8 @@ kind: Permission metadata: name: octavia-permission namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: vhost: "octavia" # name of a vhost userReference: diff --git a/kustomize/placement/base/placement-mariadb-database.yaml b/kustomize/placement/base/placement-mariadb-database.yaml index a89862ad..ff6e1c31 100644 --- a/kustomize/placement/base/placement-mariadb-database.yaml +++ b/kustomize/placement/base/placement-mariadb-database.yaml @@ -4,6 +4,8 @@ kind: Database metadata: name: placement namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the database to be created with a different name than the resource name # name: data-custom @@ -18,6 +20,8 @@ kind: User metadata: name: placement namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: # If you want the user to be created with a different name than the resource name # name: user-custom @@ -36,6 +40,8 @@ kind: Grant metadata: name: placement-grant namespace: openstack + annotations: + helm.sh/resource-policy: keep spec: mariaDbRef: name: mariadb-galera diff --git a/kustomize/prometheus-postgres-exporter/kustomization.yaml b/kustomize/prometheus-postgres-exporter/kustomization.yaml new file mode 100644 index 00000000..4461e2c4 --- /dev/null +++ b/kustomize/prometheus-postgres-exporter/kustomization.yaml @@ -0,0 +1,8 @@ +helmCharts: + - name: prometheus-postgres-exporter + repo: https://prometheus-community.github.io/helm-charts + releaseName: prometheus-postgres-exporter + namespace: openstack + version: 6.0.0 + includeCRDs: true + valuesFile: values.yaml diff --git a/kustomize/prometheus-postgres-exporter/values.yaml b/kustomize/prometheus-postgres-exporter/values.yaml new file mode 100644 index 00000000..01ebe1b9 --- /dev/null +++ b/kustomize/prometheus-postgres-exporter/values.yaml @@ -0,0 +1,259 @@ +replicaCount: 1 + +image: + registry: quay.io + repository: prometheuscommunity/postgres-exporter + # if not set appVersion field from Chart.yaml is used + tag: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +command: [] + +service: + type: ClusterIP + port: 9187 + targetPort: 9187 + name: http + labels: {} + annotations: {} + +automountServiceAccountToken: false + +serviceMonitor: + # When set true then use a ServiceMonitor to configure scraping + enabled: true + # Set the namespace the ServiceMonitor should be deployed + namespace: openstack + # Set how frequently Prometheus should scrape + # interval: 30s + # Set path to cloudwatch-exporter telemtery-path + # telemetryPath: /metrics + # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator + # labels: + # Set timeout for scrape + # timeout: 10s + # Set of labels to transfer from the Kubernetes Service onto the target + # targetLabels: [] + # MetricRelabelConfigs to apply to samples before ingestion + # metricRelabelings: [] + # Set relabel_configs as per https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + # relabelings: [] + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current prometheus-postgres-exporter service. + # - alert: HugeReplicationLag + # expr: pg_replication_lag{service="{{ template "prometheus-postgres-exporter.fullname" . }}"} / 3600 > 1 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: replication for {{ template "prometheus-postgres-exporter.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + +priorityClassName: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m +# memory: 128Mi + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Add annotations to the ServiceAccount, useful for EKS IAM Roles for Service Accounts or Google Workload Identity. + annotations: {} + +# Add a default ingress to allow namespace access to service.targetPort +# Helpful if other NetworkPolicies are configured in the namespace +networkPolicy: + # Specifies whether a NetworkPolicy should be created + enabled: false + # Set labels for the NetworkPolicy + labels: {} + +# The securityContext of the pod. +# See https://kubernetes.io/docs/concepts/policy/security-context/ for more. +podSecurityContext: + runAsGroup: 1001 + runAsUser: 1001 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + +# The securityContext of the container. +# See https://kubernetes.io/docs/concepts/policy/security-context/ for more. +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + +hostAliases: [] + # Set Host Aliases as per https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" +# - "bar.local" + +config: + ## The datasource properties on config are passed through helm tpl function. + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + datasource: + # Specify one of both datasource or datasourceSecret + host: postgresql + user: postgres + userSecret: {} + # Secret name + # name: + # User key inside secret + # key: + # Only one of password, passwordFile, passwordSecret and pgpassfile can be specified + password: + # Specify passwordFile if DB password is stored in a file. + # For example, to use with vault-injector from Hashicorp + passwordFile: '' + # Specify passwordSecret if DB password is stored in secret. + passwordSecret: + name: postgresql-db-admin + key: password + # Secret name + # name: + # Password key inside secret + # key: + pgpassfile: '' + # If pgpassfile is set, it is used to initialize the PGPASSFILE environment variable. + # See https://www.postgresql.org/docs/14/libpq-pgpass.html for more info. + port: "5432" + database: '' + sslmode: disable + extraParams: '' + datasourceSecret: {} + # Specifies if datasource should be sourced from secret value in format: postgresql://login:password@hostname:port/dbname?sslmode=disable + # Multiple Postgres databases can be configured by comma separated postgres connection strings + # Secret name + # name: + # Connection string key inside secret + # key: + disableCollectorDatabase: false + disableCollectorBgwriter: false + disableDefaultMetrics: false + disableSettingsMetrics: false + + # possible values debug, info, warn, error, fatal + logLevel: "" + # possible values logfmt, json + logFormat: "" + extraArgs: [] + + # postgres_exporter.yml + postgresExporter: "" + # auth_modules: + # first: + # type: userpass + # userpass: + # username: first + # password: firstpass + # options: + # sslmode: disable + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +annotations: { + prometheus.io/scrape: "true", + prometheus.io/path: "/metrics", + prometheus.io/port: "9187", +} + +podLabels: {} + +# Configurable health checks +livenessProbe: + initialDelaySeconds: 0 + timeoutSeconds: 3 + +readinessProbe: + initialDelaySeconds: 0 + timeoutSeconds: 1 + +# Labels and annotations to attach to the deployment resource +deployment: + labels: {} + annotations: { + prometheus.io/scrape: "true", + prometheus.io/path: "/metrics", + prometheus.io/port: "9187", + } + +# ExtraEnvs +extraEnvs: [] + # - name: EXTRA_ENV + # value: value + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: +# fieldPath: metadata.namespace + +# Init containers, e. g. for secrets creation before the exporter +initContainers: [] + # - name: + # image: + # volumeMounts: + # - name: creds +# mountPath: /creds + +# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy +extraContainers: [] + +# Additional volumes, e. g. for secrets used in an extraContainer +extraVolumes: [] +# Uncomment for mounting custom ca-certificates +# - name: ssl-certs +# secret: +# defaultMode: 420 +# items: +# - key: ca-certificates.crt +# path: ca-certificates.crt +# secretName: ssl-certs + +# Additional volume mounts +extraVolumeMounts: [] +# Uncomment for mounting custom ca-certificates file into container +# - name: ssl-certs +# mountPath: /etc/ssl/certs/ca-certificates.crt +# subPath: ca-certificates.crt + +podDisruptionBudget: + enabled: false + maxUnavailable: 1 diff --git a/kustomize/prometheus/values.yaml b/kustomize/prometheus/values.yaml index b4ae552f..8f579718 100644 --- a/kustomize/prometheus/values.yaml +++ b/kustomize/prometheus/values.yaml @@ -742,16 +742,14 @@ alertmanager: ## Storage is the definition of how storage will be used by the Alertmanager instances. ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md ## - storage: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - + storage: + volumeClaimTemplate: + spec: + storageClassName: general + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 15Gi ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false ## @@ -3572,17 +3570,14 @@ prometheus: ## Prometheus StorageSpec for persistent data ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md ## - storageSpec: {} - ## Using PersistentVolumeClaim - ## - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: general + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 15Gi ## Using tmpfs volume ## diff --git a/kustomize/rook-cluster/rook-cluster.yaml b/kustomize/rook-cluster/rook-cluster.yaml index 632d052a..c95de39f 100644 --- a/kustomize/rook-cluster/rook-cluster.yaml +++ b/kustomize/rook-cluster/rook-cluster.yaml @@ -21,7 +21,7 @@ spec: # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v17.2.6-20231027 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v18.2.1 + image: quay.io/ceph/ceph:v18.2.2 # Whether to allow unsupported versions of Ceph. Currently `quincy` and `reef` are supported. # Future versions such as `squid` (v19) would require this to be set to `true`. # Do not set to true in production. diff --git a/kustomize/sealed-secrets/base/kustomization.yaml b/kustomize/sealed-secrets/base/kustomization.yaml new file mode 100644 index 00000000..d8033add --- /dev/null +++ b/kustomize/sealed-secrets/base/kustomization.yaml @@ -0,0 +1,12 @@ +resources: + - './namespace.yaml' +namespace: sealed-secrets +helmGlobals: + chartHome: ../charts/ +helmCharts: +- name: sealed-secrets + includeCRDs: true + releaseName: sealed-secrets + valuesFile: values.yaml + version: 2.14.2 + repo: https://bitnami-labs.github.io/sealed-secrets diff --git a/kustomize/sealed-secrets/base/namespace.yaml b/kustomize/sealed-secrets/base/namespace.yaml new file mode 100644 index 00000000..100ff1eb --- /dev/null +++ b/kustomize/sealed-secrets/base/namespace.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + kubernetes.io/metadata.name: sealed-secrets + name: sealed-secrets + name: sealed-secrets diff --git a/kustomize/sealed-secrets/base/values.yaml b/kustomize/sealed-secrets/base/values.yaml new file mode 100644 index 00000000..a4172609 --- /dev/null +++ b/kustomize/sealed-secrets/base/values.yaml @@ -0,0 +1,486 @@ +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override sealed-secrets.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override sealed-secrets.fullname +## +fullnameOverride: "sealed-secrets-controller" +## @param namespace Namespace where to deploy the Sealed Secrets controller +## +namespace: "" + +## @param extraDeploy [array] Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param commonAnnotations [object] Annotations to add to all deployed resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +commonAnnotations: {} + +## @param commonLabels [object] Labels to add to all deployed resources +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +commonLabels: {} + +## @section Sealed Secrets Parameters + +## Sealed Secrets image +## ref: https://hub.docker.com/r/bitnami/sealed-secrets-controller/tags +## @param image.registry Sealed Secrets image registry +## @param image.repository Sealed Secrets image repository +## @param image.tag Sealed Secrets image tag (immutable tags are recommended) +## @param image.pullPolicy Sealed Secrets image pull policy +## @param image.pullSecrets [array] Sealed Secrets image pull secrets +## +image: + registry: docker.io + repository: bitnami/sealed-secrets-controller + tag: 0.25.0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] +## @param revisionHistoryLimit Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) +## e.g: +revisionHistoryLimit: "" +## @param createController Specifies whether the Sealed Secrets controller should be created +## +createController: true +## @param secretName The name of an existing TLS secret containing the key used to encrypt secrets +## +secretName: "sealed-secrets-key" +## @param updateStatus Specifies whether the Sealed Secrets controller should update the status subresource +## +updateStatus: true +## @param skipRecreate Specifies whether the Sealed Secrets controller should skip recreating removed secrets +## Setting it to true allows to optionally restore backward compatibility in low priviledge +## environments when old versions of the controller did not require watch permissions on secrets +## for secret re-creation. +## +skipRecreate: false +## @param keyrenewperiod Specifies key renewal period. Default 30 days +## e.g +## keyrenewperiod: "720h30m" +## +keyrenewperiod: "0" +## @param rateLimit Number of allowed sustained request per second for verify endpoint +## +rateLimit: "" +## @param rateLimitBurst Number of requests allowed to exceed the rate limit per second for verify endpoint +## +rateLimitBurst: "" +## @param additionalNamespaces List of namespaces used to manage the Sealed Secrets +## +additionalNamespaces: [] +## @param privateKeyAnnotations Map of annotations to be set on the sealing keypairs +## +privateKeyAnnotations: {} +## @param privateKeyLabels Map of labels to be set on the sealing keypairs +## +privateKeyLabels: {} +## @param logInfoStdout Specifies whether the Sealed Secrets controller will log info to stdout +## +logInfoStdout: false +## @param command Override default container command +## +command: [] +## @param args Override default container args +## +args: [] +## Configure extra options for Sealed Secret containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Sealed Secret containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Sealed Secret containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 +## @param startupProbe.enabled Enable startupProbe on Sealed Secret containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## Sealed Secret resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits [object] The resources limits for the Sealed Secret containers +## @param resources.requests [object] The requested resources for the Sealed Secret containers +## +resources: + limits: {} + requests: {} +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled Sealed Secret pods' Security Context +## @param podSecurityContext.fsGroup Set Sealed Secret pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 65534 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param containerSecurityContext.enabled Enabled Sealed Secret containers' Security Context +## @param containerSecurityContext.readOnlyRootFilesystem Whether the Sealed Secret container has a read-only root filesystem +## @param containerSecurityContext.runAsNonRoot Indicates that the Sealed Secret container must run as a non-root user +## @param containerSecurityContext.runAsUser Set Sealed Secret containers' Security Context runAsUser +## @extra containerSecurityContext.capabilities Adds and removes POSIX capabilities from running containers (see `values.yaml`) +## @skip containerSecurityContext.capabilities.drop +## +containerSecurityContext: + enabled: true + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1001 + capabilities: + drop: + - ALL + +## @param podLabels [object] Extra labels for Sealed Secret pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations [object] Annotations for Sealed Secret pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param priorityClassName Sealed Secret pods' priorityClassName +## +priorityClassName: "" +## @param runtimeClassName Sealed Secret pods' runtimeClassName +## +runtimeClassName: "" +## @param affinity [object] Affinity for Sealed Secret pods assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} +## @param nodeSelector [object] Node labels for Sealed Secret pods assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations [array] Tolerations for Sealed Secret pods assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param additionalVolumes [object] Extra Volumes for the Sealed Secrets Controller Deployment +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +additionalVolumes: [] +## @param additionalVolumeMounts [object] Extra volumeMounts for the Sealed Secrets Controller container +## ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +additionalVolumeMounts: [] +## @param hostNetwork Sealed Secrets pods' hostNetwork +hostNetwork: false +## @param dnsPolicy Sealed Secrets pods' dnsPolicy +dnsPolicy: "" + +## @section Traffic Exposure Parameters + +## Sealed Secret service parameters +## +service: + ## @param service.type Sealed Secret service type + ## + type: ClusterIP + ## @param service.port Sealed Secret service HTTP port + ## + port: 8080 + ## @param service.nodePort Node port for HTTP + ## Specify the nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePort: "" + ## @param service.annotations [object] Additional custom annotations for Sealed Secret service + ## + annotations: {} +## Sealed Secret ingress parameters +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress record generation for Sealed Secret + ## + enabled: false + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.hostname Default host for the ingress record + ## + hostname: sealed-secrets.local + ## @param ingress.path Default path for the ingress record + ## + path: /v1/cert.pem + ## @param ingress.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Relay on cert-manager to create it by setting the corresponding annotations + ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts [array] An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: sealed-secrets.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths [array] An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls [array] TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - sealed-secrets.local + ## secretName: sealed-secrets.local-tls + ## + extraTls: [] + ## @param ingress.secrets [array] Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: sealed-secrets.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## NetworkPolicy Egress configuration + ## + egress: + ## @param networkPolicy.egress.enabled Specifies wheter a egress is set in the NetworkPolicy + ## + enabled: false + ## @param networkPolicy.egress.kubeapiCidr Specifies the kubeapiCidr, which is the only egress allowed. If not set, kubeapiCidr will be found using Helm lookup + ## + kubeapiCidr: "" + ## @param networkPolicy.egress.kubeapiPort Specifies the kubeapiPort, which is the only egress allowed. If not set, kubeapiPort will be found using Helm lookup + ## + kubeapiPort: "" + +## @section Other Parameters + +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.annotations [object] Annotations for Sealed Secret service account + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: {} + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.labels Extra labels to be added to the ServiceAccount + ## + labels: {} + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the sealed-secrets.fullname template + ## + name: "" +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: true + ## @param rbac.clusterRole Specifies whether the Cluster Role resource should be created + ## + clusterRole: true + ## @param rbac.clusterRoleName Specifies the name for the Cluster Role resource + ## + clusterRoleName: "secrets-unsealer" + ## @param rbac.namespacedRoles Specifies whether the namespaced Roles should be created (in each of the specified additionalNamespaces) + ## + namespacedRoles: false + ## @param rbac.namespacedRolesName Specifies the name for the namesapced Role resource + ## + namespacedRolesName: "secrets-unsealer" + ## @param rbac.labels Extra labels to be added to RBAC resources + ## + labels: {} + ## @param rbac.pspEnabled PodSecurityPolicy + ## + pspEnabled: false + +## @section Metrics parameters + +metrics: + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Specify if a ServiceMonitor will be deployed for Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace where Prometheus Operator is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor + ## + annotations: {} + ## @param metrics.serviceMonitor.interval How frequently to scrape metrics + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.honorLabels Specify if ServiceMonitor endPoints will honor labels + ## + honorLabels: true + ## @param metrics.serviceMonitor.metricRelabelings [array] Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.relabelings [array] Specify general relabeling + ## + relabelings: [] + ## Grafana dashboards configuration + ## + dashboards: + ## @param metrics.dashboards.create Specifies whether a ConfigMap with a Grafana dashboard configuration should be created + ## ref https://github.com/helm/charts/tree/master/stable/grafana#configuration + ## + create: false + ## @param metrics.dashboards.labels Extra labels to be added to the Grafana dashboard ConfigMap + ## + labels: {} + ## @param metrics.dashboards.annotations Annotations to be added to the Grafana dashboard ConfigMap + ## + annotations: {} + ## @param metrics.dashboards.namespace Namespace where Grafana dashboard ConfigMap is deployed + ## + namespace: "" + + ## Sealed Secret Metrics service parameters + ## + service: + ## @param metrics.service.type Sealed Secret Metrics service type + ## + type: ClusterIP + ## @param metrics.service.port Sealed Secret service Metrics HTTP port + ## + port: 8081 + ## @param metrics.service.nodePort Node port for HTTP + ## Specify the nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePort: "" + ## @param metrics.service.annotations [object] Additional custom annotations for Sealed Secret Metrics service + ## + annotations: {} + +## @section PodDisruptionBudget Parameters + +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable The minimum number of pods (non number to omit) + ## + minAvailable: 1 + ## @param pdb.maxUnavailable The maximum number of unavailable pods (non number to omit) + ## + maxUnavailable: "" diff --git a/mkdocs.yml b/mkdocs.yml index 6c385096..83c0323a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -112,6 +112,9 @@ markdown_extensions: line_spans: __span pygments_lang_class: true - pymdownx.inlinehilite + - pymdownx.details + - pymdownx.tabbed: + alternate_style: true - pymdownx.snippets: restrict_base_path: false @@ -140,6 +143,7 @@ nav: - Kubernetes Dashboard: k8s-dashboard.md - Kubernetes Taint: k8s-taint.md - Retrieve kube config: k8s-config.md + - Prometheus: prometheus.md - Storage: - storage-overview.md - Ceph Internal: storage-ceph-rook-internal.md @@ -149,6 +153,7 @@ nav: - Secrets: - vault.md - Vault Operator: vault-secrets-operator.md + - Sealed Secrets: sealed-secrets.md - Infrastructure: - infrastructure-overview.md - Namespace: infrastructure-namespace.md @@ -164,6 +169,7 @@ nav: - infrastructure-ovn.md - OVN Setup: infrastructure-ovn-setup.md - MetalLB: infrastructure-metallb.md + - Gateway API: infrastructure-gateway-api.md - Loki: infrastructure-loki.md - OpenStack: - openstack-overview.md @@ -179,13 +185,19 @@ nav: - skyline: openstack-skyline.md - Octavia: openstack-octavia.md - Gnocchi: openstack-gnocchi.md + - Ceilometer: openstack-ceilometer.md - Monitoring: - Monitoring Overview: prometheus-monitoring-overview.md - - Prometheus: prometheus.md + - Getting Started: monitoring-getting-started.md + - Grafana: grafana.md - MySQL Exporter: prometheus-mysql-exporter.md - RabbitMQ Exporter: prometheus-rabbitmq-exporter.md - Memcached Exporter: prometheus-memcached-exporter.md - - Postgres Exporter: prometheus-openstack-metrics-exporter.md + - Postgres Exporter: prometheus-postgres-exporter.md + - Openstack Exporter: prometheus-openstack-metrics-exporter.md + - Alert Manager Examples: + - alertmanager-encore.md + - alertmanager-slack.md - Operational Guide: - Running Genestack Upgrade: genestack-upgrade.md - Running Kubespray Upgrade: k8s-kubespray-upgrade.md @@ -196,6 +208,7 @@ nav: - Generating Clouds YAML: openstack-clouds.md - Keystone Federation to Rackspace: openstack-keystone-federation.md - Nova Flavor Creation: openstack-flavors.md + - Nova CPU Allocation Ratio: openstack-cpu-allocation-ratio.md - Creating Networks: openstack-neutron-networks.md - Glance Images Creation: openstack-glance-images.md - Building Local Images: build-local-images.md @@ -205,3 +218,4 @@ nav: - Cloud Onboarding: - Openstack Security Groups: openstack-security-groups.md - Openstack Floating Ips: openstack-floating-ips.md + - Openstack Servers: openstack-servers.md diff --git a/submodules/nginx-gateway-fabric b/submodules/nginx-gateway-fabric new file mode 160000 index 00000000..4e3d9c4b --- /dev/null +++ b/submodules/nginx-gateway-fabric @@ -0,0 +1 @@ +Subproject commit 4e3d9c4bcc7f65fc2671beffa92ca081644325f6