From 8c740c20342404146888feb80e1202bf19c7ea2b Mon Sep 17 00:00:00 2001 From: Kevin Li Date: Mon, 17 Oct 2022 13:39:30 +0800 Subject: [PATCH 1/2] feat: add faas installation --- deployment/configs/configs.yml | 21 +- .../elasticsearch/.helmignore | 23 +- .../elasticsearch/Chart.lock | 9 + .../elasticsearch/Chart.yaml | 34 +- .../elasticsearch/Makefile | 1 - .../elasticsearch/README.md | 1187 ++++++--- .../elasticsearch/charts/common/.helmignore | 22 + .../elasticsearch/charts/common/Chart.yaml | 23 + .../elasticsearch/charts/common/README.md | 347 +++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 139 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 63 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 129 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../elasticsearch/charts/common/values.yaml | 5 + .../elasticsearch/charts/kibana/.helmignore | 21 + .../elasticsearch/charts/kibana/Chart.lock | 6 + .../elasticsearch/charts/kibana/Chart.yaml | 29 + .../elasticsearch/charts/kibana/README.md | 419 ++++ .../charts/kibana/charts/common/.helmignore | 22 + .../charts/kibana/charts/common/Chart.yaml | 23 + .../charts/kibana/charts/common/README.md | 347 +++ .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 139 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../kibana/charts/common/templates/_names.tpl | 63 + .../charts/common/templates/_secrets.tpl | 140 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../kibana/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../templates/validations/_postgresql.tpl | 129 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/kibana/charts/common/values.yaml | 5 + .../charts/kibana/ci/values-with-es.yaml | 5 + .../charts/kibana/templates/NOTES.txt | 56 + .../charts/kibana/templates/_helpers.tpl | 266 ++ .../charts/kibana/templates/configmap.yaml | 20 + .../charts/kibana/templates/deployment.yaml | 283 +++ .../charts/kibana/templates/extra-list.yaml | 4 + .../charts/kibana/templates/ingress.yaml | 61 + .../kibana/templates/plugins-configmap.yaml | 18 + .../charts/kibana/templates/pvc.yaml | 14 + .../templates/saved-objects-configmap.yaml | 39 + .../charts/kibana/templates/secret.yaml | 29 + .../charts/kibana/templates/service.yaml | 41 + .../kibana/templates/serviceaccount.yaml | 20 + .../kibana/templates/servicemonitor.yaml | 28 + .../charts/kibana/templates/tls-secret.yaml | 72 + .../elasticsearch/charts/kibana/values.yaml | 577 +++++ .../elasticsearch/ci/ct-values.yaml | 6 + .../elasticsearch/examples/config/Makefile | 21 - .../elasticsearch/examples/config/README.md | 27 - .../examples/config/test/goss.yaml | 29 - .../elasticsearch/examples/config/values.yaml | 32 - .../examples/config/watcher_encryption_key | 1 - .../elasticsearch/examples/default/Makefile | 14 - .../elasticsearch/examples/default/README.md | 25 - .../examples/default/rolling_upgrade.sh | 19 - .../examples/default/test/goss.yaml | 38 - .../examples/docker-for-mac/Makefile | 13 - .../examples/docker-for-mac/README.md | 23 - .../examples/docker-for-mac/values.yaml | 23 - .../examples/kubernetes-kind/Makefile | 17 - .../examples/kubernetes-kind/README.md | 36 - .../kubernetes-kind/values-local-path.yaml | 23 - .../examples/kubernetes-kind/values.yaml | 23 - .../elasticsearch/examples/microk8s/Makefile | 13 - .../elasticsearch/examples/microk8s/README.md | 32 - .../examples/microk8s/values.yaml | 32 - .../elasticsearch/examples/migration/Makefile | 10 - .../examples/migration/README.md | 167 -- .../examples/migration/client.yaml | 23 - .../examples/migration/data.yaml | 17 - .../examples/migration/master.yaml | 26 - .../elasticsearch/examples/minikube/Makefile | 13 - .../elasticsearch/examples/minikube/README.md | 38 - .../examples/minikube/values.yaml | 23 - .../elasticsearch/examples/multi/Makefile | 19 - .../elasticsearch/examples/multi/README.md | 29 - .../elasticsearch/examples/multi/client.yaml | 14 - .../elasticsearch/examples/multi/data.yaml | 11 - .../elasticsearch/examples/multi/master.yaml | 11 - .../examples/multi/test/goss.yaml | 9 - .../examples/networkpolicy/Makefile | 14 - .../examples/networkpolicy/values.yaml | 37 - .../elasticsearch/examples/openshift/Makefile | 13 - .../examples/openshift/README.md | 24 - .../examples/openshift/test/goss.yaml | 16 - .../examples/openshift/values.yaml | 11 - .../elasticsearch/examples/security/Makefile | 38 - .../elasticsearch/examples/security/README.md | 29 - .../examples/security/test/goss.yaml | 44 - .../examples/security/values.yaml | 38 - .../elasticsearch/examples/upgrade/Makefile | 16 - .../elasticsearch/examples/upgrade/README.md | 17 - .../examples/upgrade/scripts/upgrade.sh | 76 - .../examples/upgrade/test/goss.yaml | 16 - .../examples/upgrade/values.yaml | 2 - .../elasticsearch/templates/NOTES.txt | 138 +- .../elasticsearch/templates/_helpers.tpl | 512 +++- .../templates/configmap-curator.yaml | 11 + .../elasticsearch/templates/configmap-es.yaml | 16 + .../templates/configmap-initscripts.yaml | 12 + .../elasticsearch/templates/configmap.yaml | 16 - .../templates/coordinating-hpa.yaml | 35 + .../templates/coordinating-statefulset.yaml | 280 +++ .../templates/coordinating-svc.yaml | 27 + .../elasticsearch/templates/cronjob.yaml | 130 + .../elasticsearch/templates/data-hpa.yaml | 35 + .../templates/data-statefulset.yaml | 326 +++ .../elasticsearch/templates/data-svc.yaml | 20 + .../elasticsearch/templates/extra-list.yaml | 4 + .../templates/hooks/job.install.yaml | 73 + .../templates/ingest-statefulset.yaml | 280 +++ .../elasticsearch/templates/ingest-svc.yaml | 29 + .../elasticsearch/templates/ingress.yaml | 85 +- .../elasticsearch/templates/master-hpa.yaml | 35 + .../templates/master-statefulset.yaml | 329 +++ .../elasticsearch/templates/master-svc.yaml | 27 + .../templates/metrics-deploy.yaml | 121 + .../elasticsearch/templates/metrics-svc.yaml | 17 + .../templates/networkpolicy.yaml | 61 - .../templates/poddisruptionbudget.yaml | 12 - .../templates/podsecuritypolicy.yaml | 40 +- .../elasticsearch/templates/role.yaml | 38 +- .../elasticsearch/templates/rolebinding.yaml | 31 +- .../elasticsearch/templates/secrets.yaml | 54 + .../elasticsearch/templates/service.yaml | 77 - .../templates/serviceaccount.yaml | 58 +- .../templates/servicemonitor.yaml | 32 + .../elasticsearch/templates/statefulset.yaml | 378 --- .../test/test-elasticsearch-health.yaml | 36 - .../elasticsearch/templates/tls-secret.yaml | 99 + .../elasticsearch/values.yaml | 2143 ++++++++++++++--- .../middleware_deployment/minio/values.yaml | 4 +- .../middleware_deployment/mongodb/values.yaml | 4 +- .../middleware_deployment/mysql/values.yaml | 4 +- .../templates/pipeline/register-api.yaml | 2 +- .../quanxiang_charts/builder/values.yaml | 2 +- .../faas/templates/configmap.yaml | 6 +- .../faas/templates/kafka.yaml | 2 +- .../quanxiang_charts/faas/values.yaml | 2 + .../fileserver/templates/ingress.yaml | 1 + .../fluent-bit/templates/daemonset.yaml | 6 +- .../{config.yaml => fluent-bit-secret.yaml} | 105 +- .../polyapi/templates/ingress.yaml | 3 + .../polygate/templates/ingress.yaml | 3 + deployment/deployment/schemas/flow.sql | 17 +- deployment/go.mod | 9 +- deployment/go.sum | 60 - deployment/pkg/configMysql.go | 2 +- deployment/pkg/configs.go | 17 +- deployment/pkg/installFaas.go | 192 +- deployment/pkg/modifyValues.go | 19 +- deployment/pkg/startInstall.go | 40 +- deployment/pkg/uninstall.go | 16 +- flow | 2 +- 178 files changed, 11027 insertions(+), 2981 deletions(-) create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/Chart.lock delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/Makefile create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/.helmignore create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/Chart.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/README.md create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_affinities.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_capabilities.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_errors.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_images.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_ingress.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_labels.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_names.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_secrets.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_storage.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_tplvalues.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_utils.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_warnings.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_cassandra.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mariadb.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mongodb.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_postgresql.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_redis.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_validations.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/common/values.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/.helmignore create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.lock create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/README.md create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/.helmignore create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/Chart.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/README.md create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_affinities.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_capabilities.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_errors.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_images.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_ingress.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_labels.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_names.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_secrets.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_storage.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_tplvalues.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_utils.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_warnings.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_cassandra.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mariadb.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mongodb.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_postgresql.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_redis.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_validations.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/values.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/ci/values-with-es.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/NOTES.txt create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/_helpers.tpl create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/configmap.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/deployment.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/extra-list.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/ingress.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/plugins-configmap.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/pvc.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/saved-objects-configmap.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/secret.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/service.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/serviceaccount.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/servicemonitor.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/tls-secret.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/values.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/ci/ct-values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/config/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/config/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/config/test/goss.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/config/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/config/watcher_encryption_key delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/default/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/default/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/default/rolling_upgrade.sh delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/default/test/goss.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values-local-path.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/migration/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/migration/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/migration/client.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/migration/data.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/migration/master.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/multi/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/multi/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/multi/client.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/multi/data.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/multi/master.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/multi/test/goss.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/test/goss.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/security/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/security/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/security/test/goss.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/security/values.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/Makefile delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/README.md delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/scripts/upgrade.sh delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/test/goss.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/values.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-curator.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-es.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-initscripts.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/configmap.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-hpa.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-statefulset.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-svc.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/cronjob.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/data-hpa.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/data-statefulset.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/data-svc.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/extra-list.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/hooks/job.install.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-statefulset.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-svc.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/master-hpa.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/master-statefulset.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/master-svc.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-deploy.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-svc.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/networkpolicy.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/poddisruptionbudget.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/secrets.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/service.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/servicemonitor.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/statefulset.yaml delete mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/test/test-elasticsearch-health.yaml create mode 100644 deployment/deployment/middleware_deployment/elasticsearch/templates/tls-secret.yaml rename deployment/deployment/quanxiang_charts/fluent-bit/templates/{config.yaml => fluent-bit-secret.yaml} (53%) diff --git a/deployment/configs/configs.yml b/deployment/configs/configs.yml index 9a28710..79713ec 100644 --- a/deployment/configs/configs.yml +++ b/deployment/configs/configs.yml @@ -21,7 +21,7 @@ minio: #Service profile 服务的配置文件 image: repo: docker.io/quanxiang - tag: v1.1.2-rc1 + tag: v1.1.2 imagePullSecrets: "" domain: example.com #设置访问平台的域名 persis: @@ -32,15 +32,18 @@ args: endpoint: "example.com:31198" #fileserver 域名 域名要与domain设置的域名一致 ip: "xx.xx.xx.xx" #k8s master节点的ip faas: - git: - known_hosts: "XXX" #ssh-keyscan -p 22端口 gitlab域名或者ip |base64 - privatekey: "" #ssh-keygen -f git_rsa -t rsa -N '' -C "git@yunify.com" - gitSSh: "" #http://gitlab域名或者ip:22端口 - token: "" #将公钥写入gitlab并生成token放到这里 + git: + host: "" # git server的访问地址,例如:http://git.quanxiang.dev 或者 http://192.168.0.3 + known_hosts_scan: "" # 使用 ssh-keyscan -p 22端口 gitlab域名或者ip |base64 -w 0 生成 + sshPrivatekey: "" # 使用 ssh-keygen -t rsa -f git_rsa -C "admin@quanxiang.dev" 生成ssh key, 将私钥使用base64编码, cat git_rsa|base64 -w 0 + gitSSHAddress: "" # git server 的域名或者IP,例如:git.quanxiang.dev 或者 192.168.0.3 + gitSSHPort: 22 # git server 对外暴露的ssh端口,例如:22 + token: "" # 将公钥写入gitlab并生成token放到这里 docker: - server: "" - name: "" - pass: "" + host: "" #docker registry配置地址 + nameSpace: "" #镜像地址的中间部分,比如:[qxcr.xyz]/[quanxiang]/[faas:v1.1.2]中的quanxiang + user: "" #有push/pull这个registry空间的用户 + pass: "" #用户的密码 config: mysql: diff --git a/deployment/deployment/middleware_deployment/elasticsearch/.helmignore b/deployment/deployment/middleware_deployment/elasticsearch/.helmignore index e12c0b4..f0c1319 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/.helmignore +++ b/deployment/deployment/middleware_deployment/elasticsearch/.helmignore @@ -1,2 +1,21 @@ -tests/ -.pytest_cache/ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/deployment/middleware_deployment/elasticsearch/Chart.lock b/deployment/deployment/middleware_deployment/elasticsearch/Chart.lock new file mode 100644 index 0000000..1e6f72d --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: kibana + repository: https://charts.bitnami.com/bitnami + version: 9.3.15 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.13.0 +digest: sha256:1d02c4ef6cdafdd3119d5b7b4d95fa62ef8dd4d30c075d56133fff2e231b6449 +generated: "2022-04-03T09:18:01.109969744Z" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/Chart.yaml b/deployment/deployment/middleware_deployment/elasticsearch/Chart.yaml index a6db59e..8de1263 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/Chart.yaml +++ b/deployment/deployment/middleware_deployment/elasticsearch/Chart.yaml @@ -1,12 +1,28 @@ -apiVersion: v1 -appVersion: 7.15.0 -description: Official Elastic helm chart for Elasticsearch -home: https://github.com/elastic/helm-charts -icon: https://helm.elastic.co/icons/elasticsearch.png +annotations: + category: Analytics +apiVersion: v2 +appVersion: 7.17.2 +dependencies: +- condition: global.kibanaEnabled + name: kibana + repository: https://charts.bitnami.com/bitnami + version: 9.x.x +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Elasticsearch is a distributed search and analytics engine. It is used + for web search, log monitoring, and real-time analytics. Ideal for Big Data applications. +home: https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch +icon: https://bitnami.com/assets/stacks/elasticsearch/img/elasticsearch-stack-220x234.png +keywords: +- elasticsearch maintainers: -- email: helm-charts@elastic.co - name: Elastic +- email: containers@bitnami.com + name: Bitnami name: elasticsearch sources: -- https://github.com/elastic/elasticsearch -version: 7.15.0 +- https://github.com/bitnami/bitnami-docker-elasticsearch +- https://www.elastic.co/products/elasticsearch +version: 17.9.24 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/Makefile deleted file mode 100644 index 22218a1..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../helpers/common.mk diff --git a/deployment/deployment/middleware_deployment/elasticsearch/README.md b/deployment/deployment/middleware_deployment/elasticsearch/README.md index 1b9c7c4..a5582fc 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/README.md +++ b/deployment/deployment/middleware_deployment/elasticsearch/README.md @@ -1,456 +1,857 @@ -# Elasticsearch Helm Chart + -[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+helm-charts+master.svg)](https://devops-ci.elastic.co/job/elastic+helm-charts+master/) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/elastic)](https://artifacthub.io/packages/search?repo=elastic) +# Bitnami Elasticsearch Stack -This Helm chart is a lightweight way to configure and run our official -[Elasticsearch Docker image][]. +Elasticsearch is a distributed search and analytics engine. It is used for web search, log monitoring, and real-time analytics. Ideal for Big Data applications. - +[Overview of Elasticsearch](https://www.elastic.co/products/elasticsearch) - - +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/elasticsearch +``` -- [Requirements](#requirements) -- [Installing](#installing) - - [Install released version using Helm repository](#install-released-version-using-helm-repository) - - [Install development version from a branch](#install-development-version-from-a-branch) -- [Upgrading](#upgrading) -- [Usage notes](#usage-notes) -- [Configuration](#configuration) - - [Deprecated](#deprecated) -- [FAQ](#faq) - - [How to deploy this chart on a specific K8S distribution?](#how-to-deploy-this-chart-on-a-specific-k8s-distribution) - - [How to deploy dedicated nodes types?](#how-to-deploy-dedicated-nodes-types) - - [Clustering and Node Discovery](#clustering-and-node-discovery) - - [How to deploy clusters with security (authentication and TLS) enabled?](#how-to-deploy-clusters-with-security-authentication-and-tls-enabled) - - [How to migrate from helm/charts stable chart?](#how-to-migrate-from-helmcharts-stable-chart) - - [How to install plugins?](#how-to-install-plugins) - - [How to use the keystore?](#how-to-use-the-keystore) - - [Basic example](#basic-example) - - [Multiple keys](#multiple-keys) - - [Custom paths and keys](#custom-paths-and-keys) - - [How to enable snapshotting?](#how-to-enable-snapshotting) - - [How to configure templates post-deployment?](#how-to-configure-templates-post-deployment) -- [Contributing](#contributing) +## Introduction - - - +This chart bootstraps a [Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. -## Requirements +## Prerequisites -* Kubernetes >= 1.14 -* [Helm][] >= 2.17.0 -* Minimum cluster requirements include the following to run this chart with -default settings. All of these settings are configurable. - * Three Kubernetes nodes to respect the default "hard" affinity settings - * 1GB of RAM for the JVM heap +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure -See [supported configurations][] for more details. +## Installing the Chart -## Installing +To install the chart with the release name `my-release`: -This chart is tested with the latest 7.15.0 version. +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/elasticsearch +``` -### Install released version using Helm repository +These commands deploy Elasticsearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. -* Add the Elastic Helm charts repo: -`helm repo add elastic https://helm.elastic.co` +> **Tip**: List all releases using `helm list` -* Install it: - - with Helm 3: `helm install elasticsearch --version elastic/elasticsearch` - - with Helm 2 (deprecated): `helm install --name elasticsearch --version elastic/elasticsearch` +## Uninstalling the Chart -### Install development version from a branch +To uninstall/delete the `my-release` release: -* Clone the git repo: `git clone git@github.com:elastic/helm-charts.git` +```console +$ helm delete my-release +``` -* Checkout the branch : `git checkout 7.15` +The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using `--purge` option: -* Install it: - - with Helm 3: `helm install elasticsearch ./helm-charts/elasticsearch --set imageTag=7.15.0` - - with Helm 2 (deprecated): `helm install --name elasticsearch ./helm-charts/elasticsearch --set imageTag=7.15.0` +```console +$ helm delete --purge my-release +``` +## Parameters + +### Global parameters + +| Name | Description | Value | +| -------------------------- | ------------------------------------------------------------------ | ------------------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.coordinating.name` | Coordinating name to be used in the Kibana subchart (service name) | `coordinating-only` | +| `global.kibanaEnabled` | Whether or not to enable Kibana | `false` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### Elasticsearch parameters + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ | +| `image.registry` | Elasticsearch image registry | `docker.io` | +| `image.repository` | Elasticsearch image repository | `bitnami/elasticsearch` | +| `image.tag` | Elasticsearch image tag (immutable tags are recommended) | `7.16.3-debian-10-r0` | +| `image.pullPolicy` | Elasticsearch image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Elasticsearch image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | +| `security.enabled` | Enable X-Pack Security settings | `false` | +| `security.elasticPassword` | Password for 'elastic' user | `""` | +| `security.existingSecret` | Name of the existing secret containing the Elasticsearch password | `""` | +| `security.fipsMode` | Configure elasticsearch with FIPS 140 compliant mode | `false` | +| `security.tls.restEncryption` | Enable SSL/TLS encryption for Elasticsearch REST API. | `true` | +| `security.tls.autoGenerated` | Create self-signed TLS certificates. | `false` | +| `security.tls.verificationMode` | Verification mode for SSL communications. | `full` | +| `security.tls.master.existingSecret` | Existing secret containing the certificates for the master nodes | `""` | +| `security.tls.data.existingSecret` | Existing secret containing the certificates for the data nodes | `""` | +| `security.tls.ingest.existingSecret` | Existing secret containing the certificates for the ingest nodes | `""` | +| `security.tls.coordinating.existingSecret` | Existing secret containing the certificates for the coordinating nodes | `""` | +| `security.tls.keystorePassword` | Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. | `""` | +| `security.tls.truststorePassword` | Password to access the JKS/PKCS12 truststore when they are password-protected. | `""` | +| `security.tls.keystoreFilename` | Name of the keystore file | `elasticsearch.keystore.jks` | +| `security.tls.truststoreFilename` | Name of the truststore | `elasticsearch.truststore.jks` | +| `security.tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 | `false` | +| `security.tls.keyPassword` | Password to access the PEM key when they are password-protected. | `""` | +| `name` | Elasticsearch cluster name | `elastic` | +| `plugins` | Comma, semi-colon or space separated list of plugins to install at initialization | `""` | +| `snapshotRepoPath` | File System snapshot repository path | `""` | +| `config` | Override elasticsearch configuration | `{}` | +| `extraConfig` | Append extra configuration to the elasticsearch node configuration | `{}` | +| `extraVolumes` | A list of volumes to be added to the pod | `[]` | +| `extraVolumeMounts` | A list of volume mounts to be added to the pod | `[]` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraEnvVars` | Array containing extra env vars to be added to all pods (evaluated as a template) | `[]` | +| `extraEnvVarsConfigMap` | ConfigMap containing extra env vars to be added to all pods (evaluated as a template) | `""` | +| `extraEnvVarsSecret` | Secret containing extra env vars to be added to all pods (evaluated as a template) | `""` | + + +### Master parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `master.name` | Master-eligible node pod name | `master` | +| `master.fullnameOverride` | String to fully override elasticsearch.master.fullname template with a string | `""` | +| `master.replicas` | Desired number of Elasticsearch master-eligible nodes. Consider using an odd number of master nodes to prevent "split brain" situation. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-voting.html | `3` | +| `master.updateStrategy.type` | Update strategy for Master statefulset | `RollingUpdate` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `master.heapSize` | Master-eligible node heap size | `128m` | +| `master.podAnnotations` | Annotations for master-eligible pods. | `{}` | +| `master.podLabels` | Extra labels to add to Pod | `{}` | +| `master.securityContext.enabled` | Enable security context for master-eligible pods | `true` | +| `master.securityContext.fsGroup` | Group ID for the container for master-eligible pods | `1001` | +| `master.securityContext.runAsUser` | User ID for the container for master-eligible pods | `1001` | +| `master.podSecurityContext.enabled` | Enable security context for master-eligible pods | `false` | +| `master.podSecurityContext.fsGroup` | Group ID for the container for master-eligible pods | `1001` | +| `master.containerSecurityContext.enabled` | Enable security context for master-eligible pods | `false` | +| `master.containerSecurityContext.runAsUser` | User ID for the container for master-eligible pods | `1001` | +| `master.podAffinityPreset` | Master-eligible Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Master-eligible Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.type` | Master-eligible Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Master-eligible Node label key to match Ignored if `affinity` is set. | `""` | +| `master.nodeAffinityPreset.values` | Master-eligible Node label values to match. Ignored if `affinity` is set. | `[]` | +| `master.affinity` | Master-eligible Affinity for pod assignment | `{}` | +| `master.priorityClassName` | Master pods Priority Class Name | `""` | +| `master.nodeSelector` | Master-eligible Node labels for pod assignment | `{}` | +| `master.tolerations` | Master-eligible Tolerations for pod assignment | `[]` | +| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `master.resources.limits` | The resources limits for the container | `{}` | +| `master.resources.requests` | The requested resources for the container | `{}` | +| `master.startupProbe.enabled` | Enable/disable the startup probe (master nodes pod) | `false` | +| `master.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (master nodes pod) | `90` | +| `master.startupProbe.periodSeconds` | How often to perform the probe (master nodes pod) | `10` | +| `master.startupProbe.timeoutSeconds` | When the probe times out (master nodes pod) | `5` | +| `master.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) | `1` | +| `master.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `90` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.customStartupProbe` | Override default startup probe | `{}` | +| `master.customLivenessProbe` | Override default liveness probe | `{}` | +| `master.customReadinessProbe` | Override default readiness probe | `{}` | +| `master.initContainers` | Extra init containers to add to the Elasticsearch master-eligible pod(s) | `[]` | +| `master.sidecars` | Extra sidecar containers to add to the Elasticsearch master-eligible pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `master.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `master.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `master.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. | `""` | +| `master.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` | `{}` | +| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume Size | `8Gi` | +| `master.service.type` | Kubernetes Service type (master-eligible nodes) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port for Elasticsearch transport port (master-eligible nodes) | `9300` | +| `master.service.nodePort` | Kubernetes Service nodePort (master-eligible nodes) | `""` | +| `master.service.annotations` | Annotations for master-eligible nodes service | `{}` | +| `master.service.loadBalancerIP` | loadBalancerIP if master-eligible nodes service type is `LoadBalancer` | `""` | +| `master.ingress.enabled` | Enable ingress controller resource | `false` | +| `master.ingress.pathType` | Ingress Path type | `ImplementationSpecific` | +| `master.ingress.apiVersion` | Override API Version (automatically detected if not set) | `""` | +| `master.ingress.hostname` | Default host for the ingress resource. If specified as "*" no host rule is configured | `master.local` | +| `master.ingress.path` | The Path to Master. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `master.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `master.ingress.tls` | Enable TLS configuration for the hostname defined at master.ingress.hostname parameter | `false` | +| `master.ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `master.ingress.extraPaths` | Additional arbitrary path/backend objects | `[]` | +| `master.ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `master.ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `master.serviceAccount.create` | Enable creation of ServiceAccount for the master node | `false` | +| `master.serviceAccount.name` | Name of the created serviceAccount | `""` | +| `master.autoscaling.enabled` | Enable autoscaling for master replicas | `false` | +| `master.autoscaling.minReplicas` | Minimum number of master replicas | `2` | +| `master.autoscaling.maxReplicas` | Maximum number of master replicas | `11` | +| `master.autoscaling.targetCPU` | Target CPU utilization percentage for master replica autoscaling | `""` | +| `master.autoscaling.targetMemory` | Target Memory utilization percentage for master replica autoscaling | `""` | + + +### Coordinating parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `coordinating.fullnameOverride` | String to fully override elasticsearch.coordinating.fullname template with a string | `""` | +| `coordinating.replicas` | Desired number of Elasticsearch coordinating-only nodes | `2` | +| `coordinating.hostAliases` | Add deployment host aliases | `[]` | +| `coordinating.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `coordinating.updateStrategy.type` | Update strategy for Coordinating Statefulset | `RollingUpdate` | +| `coordinating.heapSize` | Coordinating-only node heap size | `128m` | +| `coordinating.podAnnotations` | Annotations for coordinating pods. | `{}` | +| `coordinating.podLabels` | Extra labels to add to Pod | `{}` | +| `coordinating.securityContext.enabled` | Enable security context for coordinating-only pods | `true` | +| `coordinating.securityContext.fsGroup` | Group ID for the container for coordinating-only pods | `1001` | +| `coordinating.securityContext.runAsUser` | User ID for the container for coordinating-only pods | `1001` | +| `coordinating.podSecurityContext.enabled` | Enable security context for coordinating-only pods | `false` | +| `coordinating.podSecurityContext.fsGroup` | Group ID for the container for coordinating-only pods | `1001` | +| `coordinating.containerSecurityContext.enabled` | Enable security context for coordinating-only pods | `false` | +| `coordinating.containerSecurityContext.runAsUser` | User ID for the container for coordinating-only pods | `1001` | +| `coordinating.podAffinityPreset` | Coordinating Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.podAntiAffinityPreset` | Coordinating Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.type` | Coordinating Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.key` | Coordinating Node label key to match Ignored if `affinity` is set. | `""` | +| `coordinating.nodeAffinityPreset.values` | Coordinating Node label values to match. Ignored if `affinity` is set. | `[]` | +| `coordinating.affinity` | Coordinating Affinity for pod assignment | `{}` | +| `coordinating.priorityClassName` | Coordinating pods Priority Class Name | `""` | +| `coordinating.nodeSelector` | Coordinating Node labels for pod assignment | `{}` | +| `coordinating.tolerations` | Coordinating Tolerations for pod assignment | `[]` | +| `coordinating.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `coordinating.resources.limits` | The resources limits for the container | `{}` | +| `coordinating.resources.requests` | The requested resources for the container | `{}` | +| `coordinating.startupProbe.enabled` | Enable/disable the startup probe (coordinating nodes pod) | `false` | +| `coordinating.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (coordinating nodes pod) | `90` | +| `coordinating.startupProbe.periodSeconds` | How often to perform the probe (coordinating nodes pod) | `10` | +| `coordinating.startupProbe.timeoutSeconds` | When the probe times out (coordinating nodes pod) | `5` | +| `coordinating.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating nodes pod) | `1` | +| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` | +| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` | +| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.customStartupProbe` | Override default startup probe | `{}` | +| `coordinating.customLivenessProbe` | Override default liveness probe | `{}` | +| `coordinating.customReadinessProbe` | Override default readiness probe | `{}` | +| `coordinating.initContainers` | Extra init containers to add to the Elasticsearch coordinating-only pod(s) | `[]` | +| `coordinating.sidecars` | Extra sidecar containers to add to the Elasticsearch coordinating-only pod(s) | `[]` | +| `coordinating.service.type` | Kubernetes Service type (coordinating-only nodes) | `ClusterIP` | +| `coordinating.service.port` | Kubernetes Service port for REST API (coordinating-only nodes) | `9200` | +| `coordinating.service.nodePort` | Kubernetes Service nodePort (coordinating-only nodes) | `""` | +| `coordinating.service.annotations` | Annotations for coordinating-only nodes service | `{}` | +| `coordinating.service.loadBalancerIP` | loadBalancerIP if coordinating-only nodes service type is `LoadBalancer` | `""` | +| `coordinating.service.externalTrafficPolicy` | Enable client source IP preservation with externalTrafficPolicy: Local | `Cluster` | +| `coordinating.serviceAccount.create` | Enable creation of ServiceAccount for the coordinating-only node | `false` | +| `coordinating.serviceAccount.name` | Name of the created serviceAccount | `""` | +| `coordinating.autoscaling.enabled` | Enable autoscaling for coordinating replicas | `false` | +| `coordinating.autoscaling.minReplicas` | Minimum number of coordinating replicas | `2` | +| `coordinating.autoscaling.maxReplicas` | Maximum number of coordinating replicas | `11` | +| `coordinating.autoscaling.targetCPU` | Target CPU utilization percentage for coordinating replica autoscaling | `""` | +| `coordinating.autoscaling.targetMemory` | Target Memory utilization percentage for coordinating replica autoscaling | `""` | + + +### Data parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `data.name` | Data node pod name | `data` | +| `data.fullnameOverride` | String to fully override elasticsearch.data.fullname template with a string | `""` | +| `data.replicas` | Desired number of Elasticsearch data nodes | `2` | +| `data.hostAliases` | Add deployment host aliases | `[]` | +| `data.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `data.updateStrategy.type` | Update strategy for Data statefulset | `RollingUpdate` | +| `data.updateStrategy.rollingUpdatePartition` | Partition update strategy for Data statefulset | `""` | +| `data.heapSize` | Data node heap size | `1024m` | +| `data.podAnnotations` | Annotations for data pods. | `{}` | +| `data.podLabels` | Extra labels to add to Pod | `{}` | +| `data.securityContext.enabled` | Enable security context for data pods | `true` | +| `data.securityContext.fsGroup` | Group ID for the container for data pods | `1001` | +| `data.securityContext.runAsUser` | User ID for the container for data pods | `1001` | +| `data.podSecurityContext.enabled` | Enable security context for data pods | `true` | +| `data.podSecurityContext.fsGroup` | Group ID for the container for data pods | `1001` | +| `data.containerSecurityContext.enabled` | Enable security context for data pods | `true` | +| `data.containerSecurityContext.runAsUser` | User ID for the container for data pods | `1001` | +| `data.podAffinityPreset` | Data Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.podAntiAffinityPreset` | Data Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.type` | Data Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.key` | Data Node label key to match Ignored if `affinity` is set. | `""` | +| `data.nodeAffinityPreset.values` | Data Node label values to match. Ignored if `affinity` is set. | `[]` | +| `data.affinity` | Data Affinity for pod assignment | `{}` | +| `data.priorityClassName` | Data pods Priority Class Name | `""` | +| `data.nodeSelector` | Data Node labels for pod assignment | `{}` | +| `data.tolerations` | Data Tolerations for pod assignment | `[]` | +| `data.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `data.resources.limits` | The resources limits for the container | `{}` | +| `data.resources.requests` | The requested resources for the container | `{}` | +| `data.startupProbe.enabled` | Enable/disable the startup probe (data nodes pod) | `false` | +| `data.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (data nodes pod) | `90` | +| `data.startupProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.startupProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` | +| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `90` | +| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` | +| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` | +| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.customStartupProbe` | Override default startup probe | `{}` | +| `data.customLivenessProbe` | Override default liveness probe | `{}` | +| `data.customReadinessProbe` | Override default readiness probe | `{}` | +| `data.initContainers` | Extra init containers to add to the Elasticsearch data pod(s) | `[]` | +| `data.sidecars` | Extra sidecar containers to add to the Elasticsearch data pod(s) | `[]` | +| `data.service.annotations` | Annotations for data-eligible nodes service | `{}` | +| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `data.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `data.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `data.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` ist set. | `""` | +| `data.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` | `{}` | +| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `data.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `data.persistence.size` | Persistent Volume Size | `8Gi` | +| `data.serviceAccount.create` | Enable creation of ServiceAccount for the data node | `false` | +| `data.serviceAccount.name` | Name of the created serviceAccount | `""` | +| `data.autoscaling.enabled` | Enable autoscaling for data replicas | `false` | +| `data.autoscaling.minReplicas` | Minimum number of data replicas | `2` | +| `data.autoscaling.maxReplicas` | Maximum number of data replicas | `11` | +| `data.autoscaling.targetCPU` | Target CPU utilization percentage for data replica autoscaling | `""` | +| `data.autoscaling.targetMemory` | Target Memory utilization percentage for data replica autoscaling | `""` | + + +### Ingest parameters + +| Name | Description | Value | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `ingest.enabled` | Enable ingest nodes | `false` | +| `ingest.name` | Ingest node pod name | `ingest` | +| `ingest.fullnameOverride` | String to fully override elasticsearch.ingest.fullname template with a string | `""` | +| `ingest.replicas` | Desired number of Elasticsearch ingest nodes | `2` | +| `ingest.updateStrategy.type` | Update strategy for Ingest statefulset | `RollingUpdate` | +| `ingest.heapSize` | Ingest node heap size | `128m` | +| `ingest.podAnnotations` | Annotations for ingest pods. | `{}` | +| `ingest.hostAliases` | Add deployment host aliases | `[]` | +| `ingest.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `ingest.podLabels` | Extra labels to add to Pod | `{}` | +| `ingest.securityContext.enabled` | Enable security context for ingest pods | `true` | +| `ingest.securityContext.fsGroup` | Group ID for the container for ingest pods | `1001` | +| `ingest.securityContext.runAsUser` | User ID for the container for ingest pods | `1001` | +| `ingest.podSecurityContext.enabled` | Enable security context for ingest pods | `true` | +| `ingest.podSecurityContext.fsGroup` | Group ID for the container for ingest pods | `1001` | +| `ingest.containerSecurityContext.enabled` | Enable security context for ingest pods | `true` | +| `ingest.containerSecurityContext.runAsUser` | User ID for the container for ingest pods | `1001` | +| `ingest.podAffinityPreset` | Ingest Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.podAntiAffinityPreset` | Ingest Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.type` | Ingest Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.key` | Ingest Node label key to match Ignored if `affinity` is set. | `""` | +| `ingest.nodeAffinityPreset.values` | Ingest Node label values to match. Ignored if `affinity` is set. | `[]` | +| `ingest.affinity` | Ingest Affinity for pod assignment | `{}` | +| `ingest.priorityClassName` | Ingest pods Priority Class Name | `""` | +| `ingest.nodeSelector` | Ingest Node labels for pod assignment | `{}` | +| `ingest.tolerations` | Ingest Tolerations for pod assignment | `[]` | +| `ingest.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `ingest.resources.limits` | The resources limits for the container | `{}` | +| `ingest.resources.requests` | The requested resources for the container | `{}` | +| `ingest.startupProbe.enabled` | Enable/disable the startup probe (ingest nodes pod) | `false` | +| `ingest.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (ingest nodes pod) | `90` | +| `ingest.startupProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` | +| `ingest.startupProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` | +| `ingest.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` | +| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest nodes pod) | `true` | +| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest nodes pod) | `90` | +| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` | +| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` | +| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` | +| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest nodes pod) | `true` | +| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest nodes pod) | `90` | +| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` | +| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` | +| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` | +| `ingest.customStartupProbe` | Override default startup probe | `{}` | +| `ingest.customLivenessProbe` | Override default liveness probe | `{}` | +| `ingest.customReadinessProbe` | Override default readiness probe | `{}` | +| `ingest.initContainers` | Extra init containers to add to the Elasticsearch ingest pod(s) | `[]` | +| `ingest.sidecars` | Extra sidecar containers to add to the Elasticsearch ingest pod(s) | `[]` | +| `ingest.service.type` | Kubernetes Service type (ingest nodes) | `ClusterIP` | +| `ingest.service.port` | Kubernetes Service port Elasticsearch transport port (ingest nodes) | `9300` | +| `ingest.service.nodePort` | Kubernetes Service nodePort (ingest nodes) | `""` | +| `ingest.service.annotations` | Annotations for ingest nodes service | `{}` | +| `ingest.service.loadBalancerIP` | loadBalancerIP if ingest nodes service type is `LoadBalancer` | `""` | +| `ingest.serviceAccount.create` | Create a default serviceaccount for elasticsearch curator | `false` | +| `ingest.serviceAccount.name` | Name of the created serviceAccount | `""` | + + +### Curator parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------- | +| `curator.enabled` | Enable Elasticsearch Curator cron job | `false` | +| `curator.name` | Elasticsearch Curator pod name | `curator` | +| `curator.image.registry` | Elasticsearch Curator image registry | `docker.io` | +| `curator.image.repository` | Elasticsearch Curator image repository | `bitnami/elasticsearch-curator` | +| `curator.image.tag` | Elasticsearch Curator image tag | `5.8.4-debian-10-r242` | +| `curator.image.pullPolicy` | Elasticsearch Curator image pull policy | `IfNotPresent` | +| `curator.image.pullSecrets` | Elasticsearch Curator image pull secrets | `[]` | +| `curator.cronjob.schedule` | Schedule for the CronJob | `0 1 * * *` | +| `curator.cronjob.annotations` | Annotations to add to the cronjob | `{}` | +| `curator.cronjob.concurrencyPolicy` | `Allow,Forbid,Replace` concurrent jobs | `""` | +| `curator.cronjob.failedJobsHistoryLimit` | Specify the number of failed Jobs to keep | `""` | +| `curator.cronjob.successfulJobsHistoryLimit` | Specify the number of completed Jobs to keep | `""` | +| `curator.cronjob.jobRestartPolicy` | Control the Job restartPolicy | `Never` | +| `curator.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `curator.podAnnotations` | Annotations to add to the pod | `{}` | +| `curator.podLabels` | Extra labels to add to Pod | `{}` | +| `curator.podAffinityPreset` | Curator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `curator.podAntiAffinityPreset` | Curator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `curator.nodeAffinityPreset.type` | Curator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `curator.nodeAffinityPreset.key` | Curator Node label key to match Ignored if `affinity` is set. | `""` | +| `curator.nodeAffinityPreset.values` | Curator Node label values to match. Ignored if `affinity` is set. | `[]` | +| `curator.initContainers` | Extra init containers to add to the Elasticsearch coordinating-only pod(s) | `[]` | +| `curator.sidecars` | Extra sidecar containers to add to the Elasticsearch ingest pod(s) | `[]` | +| `curator.affinity` | Curator Affinity for pod assignment | `{}` | +| `curator.nodeSelector` | Curator Node labels for pod assignment | `{}` | +| `curator.tolerations` | Curator Tolerations for pod assignment | `[]` | +| `curator.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `curator.rbac.enabled` | Enable RBAC resources | `false` | +| `curator.serviceAccount.create` | Create a default serviceaccount for elasticsearch curator | `true` | +| `curator.serviceAccount.name` | Name for elasticsearch curator serviceaccount | `""` | +| `curator.psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `curator.hooks` | Whether to run job on selected hooks | `{}` | +| `curator.dryrun` | Run Curator in dry-run mode | `false` | +| `curator.command` | Command to execute | `["curator"]` | +| `curator.env` | Environment variables to add to the cronjob container | `{}` | +| `curator.configMaps.action_file_yml` | Contents of the Curator action_file.yml | `""` | +| `curator.configMaps.config_yml` | Contents of the Curator config.yml (overrides config) | `""` | +| `curator.resources.limits` | The resources limits for the container | `{}` | +| `curator.resources.requests` | The requested resources for the container | `{}` | +| `curator.priorityClassName` | Curator Pods Priority Class Name | `""` | +| `curator.extraVolumes` | Extra volumes | `[]` | +| `curator.extraVolumeMounts` | Mount extra volume(s) | `[]` | +| `curator.extraInitContainers` | DEPRECATED. Use `curator.initContainers` instead. Init containers to add to the cronjob container | `[]` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | +| `metrics.enabled` | Enable prometheus exporter | `false` | +| `metrics.name` | Metrics pod name | `metrics` | +| `metrics.image.registry` | Metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Metrics exporter image repository | `bitnami/elasticsearch-exporter` | +| `metrics.image.tag` | Metrics exporter image tag | `1.3.0-debian-10-r83` | +| `metrics.image.pullPolicy` | Metrics exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Metrics exporter image pull secrets | `[]` | +| `metrics.extraArgs` | Extra arguments to add to the default exporter command | `[]` | +| `metrics.hostAliases` | Add deployment host aliases | `[]` | +| `metrics.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `metrics.service.type` | Metrics exporter endpoint service type | `ClusterIP` | +| `metrics.service.annotations` | Provide any additional annotations which may be required. | `{}` | +| `metrics.podAffinityPreset` | Metrics Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.podAntiAffinityPreset` | Metrics Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.type` | Metrics Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.key` | Metrics Node label key to match Ignored if `affinity` is set. | `""` | +| `metrics.nodeAffinityPreset.values` | Metrics Node label values to match. Ignored if `affinity` is set. | `[]` | +| `metrics.affinity` | Metrics Affinity for pod assignment | `{}` | +| `metrics.nodeSelector` | Metrics Node labels for pod assignment | `{}` | +| `metrics.tolerations` | Metrics Tolerations for pod assignment | `[]` | +| `metrics.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `metrics.resources.limits` | The resources limits for the container | `{}` | +| `metrics.resources.requests` | The requested resources for the container | `{}` | +| `metrics.livenessProbe.enabled` | Enable/disable the liveness probe (metrics pod) | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (metrics pod) | `60` | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `5` | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` | +| `metrics.readinessProbe.enabled` | Enable/disable the readiness probe (metrics pod) | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (metrics pod) | `5` | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `1` | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` | +| `metrics.podAnnotations` | Metrics exporter pod Annotation and Labels | `{}` | +| `metrics.podLabels` | Extra labels to add to Pod | `{}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + + +### Sysctl Image parameters + +| Name | Description | Value | +| -------------------------------- | ------------------------------------------- | ----------------------- | +| `sysctlImage.enabled` | Enable kernel settings modifier image | `true` | +| `sysctlImage.registry` | Kernel settings modifier image registry | `docker.io` | +| `sysctlImage.repository` | Kernel settings modifier image repository | `bitnami/bitnami-shell` | +| `sysctlImage.tag` | Kernel settings modifier image tag | `10-debian-10-r312` | +| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `IfNotPresent` | +| `sysctlImage.pullSecrets` | Kernel settings modifier image pull secrets | `[]` | +| `sysctlImage.resources.limits` | The resources limits for the container | `{}` | +| `sysctlImage.resources.requests` | The requested resources for the container | `{}` | + + +### VolumePermissions parameters + +| Name | Description | Value | +| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r312` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the container | `{}` | + + +### Kibana Parameters + +| Name | Description | Value | +| ---------------------------- | ------------------------------------------------------------------------- | ------ | +| `kibana.elasticsearch.hosts` | Array containing hostnames for the ES instances. Used to generate the URL | `[]` | +| `kibana.elasticsearch.port` | Port to connect Kibana and ES instance. Used to generate the URL | `9200` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set name=my-elastic,client.service.port=8080 \ + bitnami/elasticsearch +``` -## Upgrading +The above command sets the Elasticsearch cluster name to `my-elastic` and REST port number to `8080`. -Please always check [CHANGELOG.md][] and [BREAKING_CHANGES.md][] before -upgrading to a new chart version. - - -## Usage notes - -* This repo includes a number of [examples][] configurations which can be used -as a reference. They are also used in the automated testing of this chart. -* Automated testing of this chart is currently only run against GKE (Google -Kubernetes Engine). -* The chart deploys a StatefulSet and by default will do an automated rolling -update of your cluster. It does this by waiting for the cluster health to become -green after each instance is updated. If you prefer to update manually you can -set `OnDelete` [updateStrategy][]. -* It is important to verify that the JVM heap size in `esJavaOpts` and to set -the CPU/Memory `resources` to something suitable for your cluster. -* To simplify chart and maintenance each set of node groups is deployed as a -separate Helm release. Take a look at the [multi][] example to get an idea for -how this works. Without doing this it isn't possible to resize persistent -volumes in a StatefulSet. By setting it up this way it makes it possible to add -more nodes with a new storage size then drain the old ones. It also solves the -problem of allowing the user to determine which node groups to update first when -doing upgrades or changes. -* W**e have designed this chart to be very un-opinionated about how to configure -Elasticsearch. It exposes ways to set environment variables** and mount secrets -inside of the container. Doing this makes it much easier for this chart to -support multiple versions with minimal changes. - - -## Configuration - -| Parameter | Description | Default | -|------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------| -| `antiAffinityTopologyKey` | The [anti-affinity][] topology key. By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` | -| `antiAffinity` | Setting this to hard enforces the [anti-affinity][] rules. If it is set to soft it will be done "best effort". Other values will be ignored | `hard` | -| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params][] that will be used by readiness [probe][] command | `wait_for_status=green&timeout=1s` | -| `clusterName` | This will be used as the Elasticsearch [cluster.name][] and should be unique per cluster in the namespace | `elasticsearch` | -| `enableServiceLinks` | Set to false to disabling service links, which can cause slow pod startup times when there are many services in the current namespace. | `true` | -| `envFrom` | Templatable string to be passed to the [environment from variables][] which will be appended to the `envFrom:` definition for the container | `[]` | -| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml][] for an example of the formatting | `{}` | -| `esJavaOpts` | [Java options][] for Elasticsearch. This is where you could configure the [jvm heap size][] | `""` | -| `esMajorVersion` | Deprecated. Instead, use the version of the chart corresponding to your ES minor version. Used to set major version specific configuration. If you are using a custom image and not running the default Elasticsearch version you will need to set this to the version you are running (e.g. `esMajorVersion: 6`) | `""` | -| `extraContainers` | Templatable string of additional `containers` to be passed to the `tpl` function | `""` | -| `extraEnvs` | Extra [environment variables][] which will be appended to the `env:` definition for the container | `[]` | -| `extraInitContainers` | Templatable string of additional `initContainers` to be passed to the `tpl` function | `""` | -| `extraVolumeMounts` | Templatable string of additional `volumeMounts` to be passed to the `tpl` function | `""` | -| `extraVolumes` | Templatable string of additional `volumes` to be passed to the `tpl` function | `""` | -| `fullnameOverride` | Overrides the `clusterName` and `nodeGroup` when used in the naming of resources. This should only be used when using a single `nodeGroup`, otherwise you will have name conflicts | `""` | -| `healthNameOverride` | Overrides `test-elasticsearch-health` pod name | `""` | -| `hostAliases` | Configurable [hostAliases][] | `[]` | -| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port][] in `extraEnvs` | `9200` | -| `imagePullPolicy` | The Kubernetes [imagePullPolicy][] value | `IfNotPresent` | -| `imagePullSecrets` | Configuration for [imagePullSecrets][] so that you can use a private registry for your image | `[]` | -| `imageTag` | The Elasticsearch Docker image tag | `7.15.0` | -| `image` | The Elasticsearch Docker image | `docker.elastic.co/elasticsearch/elasticsearch` | -| `ingress` | Configurable [ingress][] to expose the Elasticsearch service. See [values.yaml][] for an example | see [values.yaml][] | -| `initResources` | Allows you to set the [resources][] for the `initContainer` in the StatefulSet | `{}` | -| `keystore` | Allows you map Kubernetes secrets into the keystore. See the [config example][] and [how to use the keystore][] | `[]` | -| `labels` | Configurable [labels][] applied to all Elasticsearch pods | `{}` | -| `lifecycle` | Allows you to add [lifecycle hooks][]. See [values.yaml][] for an example of the formatting | `{}` | -| `masterService` | The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery][] for more information | `""` | -| `maxUnavailable` | The [maxUnavailable][] value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` | -| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes][]. Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7 | `2` | -| `nameOverride` | Overrides the `clusterName` when used in the naming of resources | `""` | -| `networkHost` | Value for the [network.host Elasticsearch setting][] | `0.0.0.0` | -| `networkPolicy` | The [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to set. See [`values.yaml`](values.yaml) for an example | `{http.enabled: false,transport.enabled: false}` | -| `nodeAffinity` | Value for the [node affinity settings][] | `{}` | -| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` , `nameOverride-nodeGroup-X` if a `nameOverride` is specified, and `fullnameOverride-X` if a `fullnameOverride` is specified | `master` | -| `nodeSelector` | Configurable [nodeSelector][] so that you can target specific nodes for your Elasticsearch cluster | `{}` | -| `persistence` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles][] which don't require persistent data | see [values.yaml][] | -| `podAnnotations` | Configurable [annotations][] applied to all Elasticsearch pods | `{}` | -| `podManagementPolicy` | By default Kubernetes [deploys StatefulSets serially][]. This deploys them in parallel so that they can discover each other | `Parallel` | -| `podSecurityContext` | Allows you to set the [securityContext][] for the pod | see [values.yaml][] | -| `podSecurityPolicy` | Configuration for create a pod security policy with minimal permissions to run this Helm chart with `create: true`. Also can be used to reference an external pod security policy with `name: "externalPodSecurityPolicy"` | see [values.yaml][] | -| `priorityClassName` | The name of the [PriorityClass][]. No default is supplied as the PriorityClass must be created first | `""` | -| `protocol` | The protocol that will be used for the readiness [probe][]. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` | -| `rbac` | Configuration for creating a role, role binding and ServiceAccount as part of this Helm chart with `create: true`. Also can be used to reference an external ServiceAccount with `serviceAccountName: "externalServiceAccountName"` | see [values.yaml][] | -| `readinessProbe` | Configuration fields for the readiness [probe][] | see [values.yaml][] | -| `replicas` | Kubernetes replica count for the StatefulSet (i.e. how many pods) | `3` | -| `resources` | Allows you to set the [resources][] for the StatefulSet | see [values.yaml][] | -| `roles` | A hash map with the specific [roles][] for the `nodeGroup` | see [values.yaml][] | -| `schedulerName` | Name of the [alternate scheduler][] | `""` | -| `secretMounts` | Allows you easily mount a secret as a file inside the StatefulSet. Useful for mounting certificates and other secrets. See [values.yaml][] for an example | `[]` | -| `securityContext` | Allows you to set the [securityContext][] for the container | see [values.yaml][] | -| `service.annotations` | [LoadBalancer annotations][] that Kubernetes will use for the service. This will configure load balancer if `service.type` is `LoadBalancer` | `{}` | -| `service.enabled` | Enable non-headless service | `true` | -| `service.externalTrafficPolicy` | Some cloud providers allow you to specify the [LoadBalancer externalTrafficPolicy][]. Kubernetes will use this to preserve the client source IP. This will configure load balancer if `service.type` is `LoadBalancer` | `""` | -| `service.httpPortName` | The name of the http port within the service | `http` | -| `service.labelsHeadless` | Labels to be added to headless service | `{}` | -| `service.labels` | Labels to be added to non-headless service | `{}` | -| `service.loadBalancerIP` | Some cloud providers allow you to specify the [loadBalancer][] IP. If the `loadBalancerIP` field is not specified, the IP is dynamically assigned. If you specify a `loadBalancerIP` but your cloud provider does not support the feature, it is ignored. | `""` | -| `service.loadBalancerSourceRanges` | The IP ranges that are allowed to access | `[]` | -| `service.nodePort` | Custom [nodePort][] port that can be set if you are using `service.type: nodePort` | `""` | -| `service.transportPortName` | The name of the transport port within the service | `transport` | -| `service.type` | Elasticsearch [Service Types][] | `ClusterIP` | -| `sysctlInitContainer` | Allows you to disable the `sysctlInitContainer` if you are setting [sysctl vm.max_map_count][] with another method | `enabled: true` | -| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count][] needed for Elasticsearch | `262144` | -| `terminationGracePeriod` | The [terminationGracePeriod][] in seconds used when trying to stop the pod | `120` | -| `tests.enabled` | Enable creating test related resources when running `helm template` or `helm test` | `true` | -| `tolerations` | Configurable [tolerations][] | `[]` | -| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration][] in `extraEnvs` | `9300` | -| `updateStrategy` | The [updateStrategy][] for the StatefulSet. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` | -| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for StatefulSets][]. You will want to adjust the storage (default `30Gi` ) and the `storageClassName` if you are using a different storage class | see [values.yaml][] | - -### Deprecated - -| Parameter | Description | Default | -|-----------|---------------------------------------------------------------------------------------------------------------|---------| -| `fsGroup` | The Group ID (GID) for [securityContext][] so that the Elasticsearch user can read from the persistent volume | `""` | - - -## FAQ - -### How to deploy this chart on a specific K8S distribution? - -This chart is designed to run on production scale Kubernetes clusters with -multiple nodes, lots of memory and persistent storage. For that reason it can be -a bit tricky to run them against local Kubernetes environments such as -[Minikube][]. - -This chart is highly tested with [GKE][], but some K8S distribution also -requires specific configurations. - -We provide examples of configuration for the following K8S providers: - -- [Docker for Mac][] -- [KIND][] -- [Minikube][] -- [MicroK8S][] -- [OpenShift][] - -### How to deploy dedicated nodes types? - -All the Elasticsearch pods deployed share the same configuration. If you need to -deploy dedicated [nodes types][] (for example dedicated master and data nodes), -you can deploy multiple releases of this chart with different configurations -while they share the same `clusterName` value. - -For each Helm release, the nodes types can then be defined using `roles` value. - -An example of Elasticsearch cluster using 2 different Helm releases for master -and data nodes can be found in [examples/multi][]. - -#### Clustering and Node Discovery - -This chart facilitates Elasticsearch node discovery and services by creating two -`Service` definitions in Kubernetes, one with the name `$clusterName-$nodeGroup` -and another named `$clusterName-$nodeGroup-headless`. -Only `Ready` pods are a part of the `$clusterName-$nodeGroup` service, while all -pods ( `Ready` or not) are a part of `$clusterName-$nodeGroup-headless`. - -If your group of master nodes has the default `nodeGroup: master` then you can -just add new groups of nodes with a different `nodeGroup` and they will -automatically discover the correct master. If your master nodes have a different -`nodeGroup` name then you will need to set `masterService` to -`$clusterName-$masterNodeGroup`. - -The chart value for `masterService` is used to populate -`discovery.zen.ping.unicast.hosts` , which Elasticsearch nodes will use to -contact master nodes and form a cluster. -Therefore, to add a group of nodes to an existing cluster, setting -`masterService` to the desired `Service` name of the related cluster is -sufficient. - -### How to deploy clusters with security (authentication and TLS) enabled? - -This Helm chart can use existing [Kubernetes secrets][] to setup -credentials or certificates for examples. These secrets should be created -outside of this chart and accessed using [environment variables][] and volumes. - -An example of Elasticsearch cluster using security can be found in -[examples/security][]. - -### How to migrate from helm/charts stable chart? - -If you currently have a cluster deployed with the [helm/charts stable][] chart -you can follow the [migration guide][]. - -### How to install plugins? +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, -The recommended way to install plugins into our Docker images is to create a -[custom Docker image][]. +```console +$ helm install my-release -f values.yaml bitnami/elasticsearch +``` -The Dockerfile would look something like: +> **Tip**: You can use the default [values.yaml](values.yaml). -``` -ARG elasticsearch_version -FROM docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version} +## Configuration and installation details -RUN bin/elasticsearch-plugin install --batch repository-gcs -``` +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) -And then updating the `image` in values to point to your custom image. +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. -There are a couple reasons we recommend this. +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. -1. Tying the availability of Elasticsearch to the download service to install -plugins is not a great idea or something that we recommend. Especially in -Kubernetes where it is normal and expected for a container to be moved to -another host at random times. -2. Mutating the state of a running Docker image (by installing plugins) goes -against best practices of containers and immutable infrastructure. +### Change ElasticSearch version -### How to use the keystore? +To modify the ElasticSearch version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/elasticsearch/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. -#### Basic example +### Default kernel settings -Create the secret, the key name needs to be the keystore key path. In this -example we will create a secret from a file and from a literal string. +Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below: -``` -kubectl create secret generic encryption-key --from-file=xpack.watcher.encryption_key=./watcher_encryption_key -kubectl create secret generic slack-hook --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' -``` +- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html) +- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html) -To add these secrets to the keystore: +This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`. +You can disable the initContainer using the `sysctlImage.enabled=false` parameter. -``` -keystore: - - secretName: encryption-key - - secretName: slack-hook -``` +### Enable bundled Kibana + +This Elasticsearch chart contains Kibana as subchart, you can enable it just setting the `global.kibanaEnabled=true` parameter. +To see the notes with some operational instructions from the Kibana chart, please use the `--render-subchart-notes` as part of your `helm install` command, in this way you can see the Kibana and ES notes in your terminal. -#### Multiple keys +When enabling the bundled kibana subchart, there are a few gotchas that you should be aware of listed below. -All keys in the secret will be added to the keystore. To create the previous -example in one secret you could also do: +#### Elasticsearch rest Encryption +When enabling elasticsearch' rest endpoint encryption you will also need to set `kibana.elasticsearch.security.tls.enabled` to the SAME value along with some additional values shown below for an "out of the box experience": + +```yaml +security: + enabled: true + # PASSWORD must be the same value passed to elasticsearch to get an "out of the box" experience + elasticPassword: "" + tls: + # AutoGenerate TLS certs for elastic + autoGenerated: true + +kibana: + elasticsearch: + security: + auth: + enabled: true + # default in the elasticsearch chart is elastic + kibanaUsername: "" + kibanaPassword: "" + tls: + # Instruct kibana to connect to elastic over https + enabled: true + # Bit of a catch 22, as you will need to know the name upfront of your release + existingSecret: RELEASENAME-elasticsearch-coordinating-only-crt + # As the certs are auto-generated, they are pemCerts so set to true + usePemCerts: true ``` -kubectl create secret generic keystore-secrets --from-file=xpack.watcher.encryption_key=./watcher_encryption_key --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' + +At a bare-minimum, when working with kibana and elasticsearch together the following values MUST be the same, otherwise things will fail: + +```yaml +security: + tls: + restEncryption: true + +# assumes global.kibanaEnabled=true +kibana: + elasticsearch: + security: + tls: + enabled: true ``` +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: ELASTICSEARCH_VERSION + value: 7.0 ``` -keystore: - - secretName: keystore-secrets + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsConfigMap` or the `extraEnvVarsSecret` values. + +### Using custom init scripts + +For advanced operations, the Bitnami Elasticsearch charts allows using custom init scripts that will be mounted inside `/docker-entrypoint.init-db`. You can include the file directly in your `values.yaml` with `initScripts`, or use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. In this case you use the `initScriptsCM` and `initScriptsSecret` values. + +```console +initScriptsCM=special-scripts +initScriptsSecret=special-scripts-sensitive ``` -#### Custom paths and keys +### Snapshot and restore operations + +As it's described in the [official documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html#snapshots-filesystem-repository), it's necessary to register a snapshot repository before you can perform snapshot and restore operations. -If you are using these secrets for other applications (besides the Elasticsearch -keystore) then it is also possible to specify the keystore path and which keys -you want to add. Everything specified under each `keystore` item will be passed -through to the `volumeMounts` section for mounting the [secret][]. In this -example we will only add the `slack_hook` key from a secret that also has other -keys. Our secret looks like this: +This chart allows you to configure Elasticsearch to use a shared file system to store snapshots. To do so, you need to mount a RWX volume on every Elasticsearch node, and set the parameter `snapshotRepoPath` with the path where the volume is mounted. In the example below, you can find the values to set when using a NFS Perstitent Volume: +```yaml +extraVolumes: + - name: snapshot-repository + nfs: + server: nfs.example.com # Please change this to your NFS server + path: /share1 +extraVolumeMounts: + - name: snapshot-repository + mountPath: /snapshots +snapshotRepoPath: "/snapshots" ``` -kubectl create secret generic slack-secrets --from-literal=slack_channel='#general' --from-literal=slack_hook='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as Elasticsearch components (e.g. an additional metrics or logging exporter), you can do so via the `XXX.sidecars` parameter(s), where XXX is placeholder you need to replace with the actual component(s). Simply define your container according to the Kubernetes container spec. + + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 ``` -We only want to add the `slack_hook` key to the keystore at path -`xpack.notification.slack.account.monitoring.secure_url`: +Similarly, you can add extra init containers using the `initContainers` parameter. +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname ``` -keystore: - - secretName: slack-secrets - items: - - key: slack_hook - path: xpack.notification.slack.account.monitoring.secure_url + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Persistence + +The [Bitnami Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) image stores the Elasticsearch data at the `/bitnami/elasticsearch/data` path of the container. + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 17.0.0 + +This version bumps in a major the version of the Kibana Helm Chart bundled as dependecy, [here](https://github.com/bitnami/charts/tree/master/bitnami/kibana#to-900) you can see the changes implemented in this Kibana major version. + +### To 16.0.0 + +This version replaces the Ingest and Coordinating Deployments with Statefulsets. This change is required so Coordinating and Ingest nodes have their services associated, required for TLS hostname verification. + +We haven't encountered any issues during our upgrade test, but we recommend creating volumes backups before upgrading this major version, especially for users with additional volumes and custom configurations. + +Additionally, this version adds support for X-Pack Security features such as TLS/SSL encryption and basic authentication. + +### To 15.0.0 + +From this version onwards, Elasticsearch container components are now licensed under the [Elastic License](https://www.elastic.co/licensing/elastic-license) that is not currently accepted as an Open Source license by the Open Source Initiative (OSI). + +Also, from now on, the Helm Chart will include the X-Pack plugin installed by default. + +Regular upgrade is compatible from previous versions. + +### To 14.0.0 + +This version standardizes the way of defining Ingress rules in the Kibana subchart. When configuring a single hostname for the Ingress rule, set the `kibana.ingress.hostname` value. When defining more than one, set the `kibana.ingress.extraHosts` array. Apart from this case, no issues are expected to appear when upgrading. + +### To 13.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 12.0.0 + +Several changes were introduced that breaks backwards compatibility: + +- Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). +- Labels are adapted to follow the Helm charts best practices. +- Elasticsearch data pods are now deployed in parallel in order to bootstrap the cluster and be discovered. + +### To 11.0.0 + +Elasticsearch master pods are now deployed in parallel in order to bootstrap the cluster and be discovered. + +The field `podManagementPolicy` can't be updated in a StatefulSet, so you need to destroy it before you upgrade the chart to this version. + +```console +$ kubectl delete statefulset elasticsearch-master +$ helm upgrade bitnami/elasticsearch ``` -You can also take a look at the [config example][] which is used as part of the -automated testing pipeline. +### TO 10.0.0 -### How to enable snapshotting? +In this version, Kibana was added as dependent chart. More info about how to enable and work with this bundled Kibana in the ["Enable bundled Kibana"](#enable-bundled-kibana) section. -1. Install your [snapshot plugin][] into a custom Docker image following the -[how to install plugins guide][]. -2. Add any required secrets or credentials into an Elasticsearch keystore -following the [how to use the keystore][] guide. -3. Configure the [snapshot repository][] as you normally would. -4. To automate snapshots you can use [Snapshot Lifecycle Management][] or a tool -like [curator][]. +### To 9.0.0 -### How to configure templates post-deployment? +Elasticsearch master nodes store the cluster status at `/bitnami/elasticsearch/data`. Among other things this includes the UUID of the elasticsearch cluster. Without a persistent data store for this data, the UUID of a cluster could change if k8s node(s) hosting the es master nodes go down and are scheduled on some other master node. In the event that this happens, the data nodes will no longer be able to join a cluster as the uuid changed resulting in a broken cluster. -You can use `postStart` [lifecycle hooks][] to run code triggered after a -container is created. +To resolve such issues, PVC's are now attached for master node data persistence. -Here is an example of `postStart` hook to configure templates: +--- -```yaml -lifecycle: - postStart: - exec: - command: - - bash - - -c - - | - #!/bin/bash - # Add a template to adjust number of shards/replicas - TEMPLATE_NAME=my_template - INDEX_PATTERN="logstash-*" - SHARD_COUNT=8 - REPLICA_COUNT=1 - ES_URL=http://localhost:9200 - while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +### To 7.4.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 7.0.0 + +This version enabled by default the initContainer that modify some kernel settings to meet the Elasticsearch requirements. More info in the ["Default kernel settings"](#default-kernel-settings) section. +You can disable the initContainer using the `sysctlImage.enabled=false` parameter. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch: + +```console +$ kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl delete statefulset elasticsearch-data --cascade=false ``` +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 -## Contributing - -Please check [CONTRIBUTING.md][] before any contribution or for any questions -about our development and testing process. - -[7.15]: https://github.com/elastic/helm-charts/releases -[#63]: https://github.com/elastic/helm-charts/issues/63 -[BREAKING_CHANGES.md]: https://github.com/elastic/helm-charts/blob/master/BREAKING_CHANGES.md -[CHANGELOG.md]: https://github.com/elastic/helm-charts/blob/master/CHANGELOG.md -[CONTRIBUTING.md]: https://github.com/elastic/helm-charts/blob/master/CONTRIBUTING.md -[alternate scheduler]: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/#specify-schedulers-for-pods -[annotations]: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -[anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -[cluster.name]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/important-settings.html#cluster-name -[clustering and node discovery]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/README.md#clustering-and-node-discovery -[config example]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/config/values.yaml -[curator]: https://www.elastic.co/guide/en/elasticsearch/client/curator/7.9/snapshot.html -[custom docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/docker.html#_c_customized_image -[deploys statefulsets serially]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies -[discovery.zen.minimum_master_nodes]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/discovery-settings.html#minimum_master_nodes -[docker for mac]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/docker-for-mac -[elasticsearch cluster health status params]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/cluster-health.html#request-params -[elasticsearch docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/docker.html -[environment variables]: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config -[environment from variables]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables -[examples]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/ -[examples/multi]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/multi -[examples/security]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/security -[gke]: https://cloud.google.com/kubernetes-engine -[helm]: https://helm.sh -[helm/charts stable]: https://github.com/helm/charts/tree/master/stable/elasticsearch/ -[how to install plugins guide]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/README.md#how-to-install-plugins -[how to use the keystore]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/README.md#how-to-use-the-keystore -[http.port]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/modules-http.html#_settings -[imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images -[imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret -[ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[java options]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/jvm-options.html -[jvm heap size]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/heap-size.html -[hostAliases]: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ -[kind]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/kubernetes-kind -[kubernetes secrets]: https://kubernetes.io/docs/concepts/configuration/secret/ -[labels]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -[lifecycle hooks]: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ -[loadBalancer annotations]: https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws -[loadBalancer externalTrafficPolicy]: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip -[loadBalancer]: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer -[maxUnavailable]: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget -[migration guide]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/migration/README.md -[minikube]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/minikube -[microk8s]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/microk8s -[multi]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/multi/ -[network.host elasticsearch setting]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/network.host.html -[node affinity settings]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -[node-certificates]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/configuring-tls.html#node-certificates -[nodePort]: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport -[nodes types]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/modules-node.html -[nodeSelector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector -[openshift]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/openshift -[priorityClass]: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -[probe]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ -[resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ -[roles]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/modules-node.html -[secret]: https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets -[securityContext]: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -[service types]: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types -[snapshot lifecycle management]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/snapshot-lifecycle-management.html -[snapshot plugin]: https://www.elastic.co/guide/en/elasticsearch/plugins/7.15/repository.html -[snapshot repository]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/modules-snapshots.html -[supported configurations]: https://github.com/elastic/helm-charts/tree/7.15/README.md#supported-configurations -[sysctl vm.max_map_count]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/vm-max-map-count.html#vm-max-map-count -[terminationGracePeriod]: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods -[tolerations]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -[transport port configuration]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/modules-transport.html#_transport_settings -[updateStrategy]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ -[values.yaml]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/values.yaml -[volumeClaimTemplate for statefulsets]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/.helmignore b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/Chart.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/Chart.yaml new file mode 100644 index 0000000..2c93878 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.0 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/README.md b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/README.md new file mode 100644 index 0000000..c090f74 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_affinities.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_capabilities.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..4ec8321 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_errors.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_images.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_ingress.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_labels.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_names.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_names.tpl new file mode 100644 index 0000000..c8574d1 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_secrets.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_storage.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_tplvalues.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_utils.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_warnings.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_cassandra.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mariadb.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mongodb.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_postgresql.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_redis.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_validations.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/common/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/.helmignore b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.lock b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.lock new file mode 100644 index 0000000..54d6890 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.13.0 +digest: sha256:e83af41b39942278f8389623671732e624f28c6f1ad6ac2d937e210c5f354a18 +generated: "2022-03-27T05:42:16.304282468Z" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.yaml new file mode 100644 index 0000000..e5dd433 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Analytics +apiVersion: v2 +appVersion: 7.17.2 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Kibana is an open source, browser based analytics and search dashboard + for Elasticsearch. Kibana strives to be easy to get started with, while also being + flexible and powerful. +home: https://github.com/bitnami/charts/tree/master/bitnami/kibana +icon: https://bitnami.com/assets/stacks/kibana/img/kibana-stack-220x234.png +keywords: +- kibana +- analytics +- monitoring +- metrics +- logs +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: kibana +sources: +- https://github.com/bitnami/bitnami-docker-kibana +- https://www.elastic.co/products/kibana +version: 9.3.15 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/README.md b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/README.md new file mode 100644 index 0000000..a9b19aa --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/README.md @@ -0,0 +1,419 @@ + + +# Kibana packaged by Bitnami + +Kibana is an open source, browser based analytics and search dashboard for Elasticsearch. Kibana strives to be easy to get started with, while also being flexible and powerful. + +[Overview of Kibana](https://www.elastic.co/products/kibana) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/kibana --set elasticsearch.hosts[0]= --set elasticsearch.port= +``` + +## Introduction + +This chart bootstraps a [Kibana](https://github.com/bitnami/bitnami-docker-kibana) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure +- ReadWriteMany volumes for deployment scaling + +## Installing the Chart + +This chart requires an Elasticsearch instance to work. You can use an already existing Elasticsearch instance. + + To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release \ + --set elasticsearch.hosts[0]= \ + --set elasticsearch.port= \ + bitnami/kibana +``` + +These commands deploy Kibana on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` statefulset: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. Use the option `--purge` to delete all history too. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------ | --------------------------------------------------------------------------------------------------------- | ----- | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `nameOverride` | String to partially override common.names.fullname template with a string (will prepend the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template with a string | `""` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | + + +### Kibana parameters + +| Name | Description | Value | +| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `image.registry` | Kibana image registry | `docker.io` | +| `image.repository` | Kibana image repository | `bitnami/kibana` | +| `image.tag` | Kibana image tag (immutable tags are recommended) | `7.16.2-debian-10-r20` | +| `image.pullPolicy` | Kibana image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `replicaCount` | Number of replicas of the Kibana Pod | `1` | +| `updateStrategy.type` | Set up update strategy for Kibana installation. | `RollingUpdate` | +| `schedulerName` | Alternative scheduler | `""` | +| `hostAliases` | Add deployment host aliases | `[]` | +| `plugins` | Array containing the Kibana plugins to be installed in deployment | `[]` | +| `savedObjects.urls` | Array containing links to NDJSON files to be imported during Kibana initialization | `[]` | +| `savedObjects.configmap` | Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template) | `""` | +| `extraConfiguration` | Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template | `{}` | +| `configurationCM` | ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml | `""` | +| `extraEnvVars` | Array containing extra env vars to configure Kibana | `[]` | +| `extraEnvVarsCM` | ConfigMap containing extra env vars to configure Kibana | `""` | +| `extraEnvVarsSecret` | Secret containing extra env vars to configure Kibana (in case of sensitive data) | `""` | +| `extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` | +| `extraVolumeMounts` | Array to add extra mounts. Normally used with `extraVolumes` | `[]` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r305` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources` | Volume Permissions resources | `{}` | +| `persistence.enabled` | Enable persistence | `true` | +| `persistence.storageClass` | Kibana data Persistent Volume Storage Class | `""` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `""` | +| `persistence.accessMode` | Access mode to the PV | `ReadWriteOnce` | +| `persistence.size` | Size for the PV | `10Gi` | +| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `120` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `forceInitScripts` | Force execution of init scripts | `false` | +| `initScriptsCM` | Configmap with init scripts to execute | `""` | +| `initScriptsSecret` | Secret with init scripts to execute (for sensitive data) | `""` | +| `service.port` | Kubernetes Service port | `5601` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.nodePort` | Specify the nodePort value for the LoadBalancer and NodePort service types | `""` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.annotations` | Annotations for Kibana service (evaluated as a template) | `{}` | +| `service.labels` | Extra labels for Kibana service | `{}` | +| `service.loadBalancerIP` | loadBalancerIP if Kibana service type is `LoadBalancer` | `""` | +| `service.extraPorts` | Extra ports to expose in the service (normally used with the `sidecar` value) | `[]` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.pathType` | Ingress Path type | `ImplementationSpecific` | +| `ingress.apiVersion` | Override API Version (automatically detected if not set) | `""` | +| `ingress.hostname` | Default host for the ingress resource. If specified as "*" no host rule is configured | `kibana.local` | +| `ingress.path` | The Path to Kibana. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at ingress.hostname parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | Additional arbitrary path/backend objects | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kibana | `true` | +| `serviceAccount.name` | Name of serviceAccount | `""` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `containerPort` | Port to expose at container level | `5601` | +| `securityContext.enabled` | Enable securityContext on for Kibana deployment | `true` | +| `securityContext.fsGroup` | Group to configure permissions for volumes | `1001` | +| `securityContext.runAsUser` | User for the security context | `1001` | +| `securityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `resources.limits` | The resources limits for the container | `{}` | +| `resources.requests` | The requested resources for the container | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Extra labels to add to Pod | `{}` | +| `sidecars` | Attach additional containers to the pod | `[]` | +| `initContainers` | Add additional init containers to the pod | `[]` | +| `configuration` | Kibana configuration | `{}` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.service.annotations` | Prometheus annotations for the Kibana service | `{}` | +| `metrics.serviceMonitor.enabled` | If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + + +### Kibana server TLS configuration + +| Name | Description | Value | +| ---------------------- | ------------------------------------------------------------------------------ | ------- | +| `tls.enabled` | Enable SSL/TLS encryption for Kibana server (HTTPS) | `false` | +| `tls.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` | +| `tls.existingSecret` | Name of the existing secret containing Kibana server certificates | `""` | +| `tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of PKCS12 | `false` | +| `tls.keyPassword` | Password to access the PEM key when it is password-protected. | `""` | +| `tls.keystorePassword` | Password to access the PKCS12 keystore when it is password-protected. | `""` | +| `tls.passwordsSecret` | Name of a existing secret containing the Keystore or PEM key password | `""` | + + +### Elasticsearch parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------- | +| `elasticsearch.hosts` | List of elasticsearch hosts to connect to. | `[]` | +| `elasticsearch.port` | Elasticsearch port | `""` | +| `elasticsearch.security.auth.enabled` | Set to 'true' if Elasticsearch has authentication enabled | `false` | +| `elasticsearch.security.auth.kibanaUsername` | Kibana server user to authenticate with Elasticsearch | `elastic` | +| `elasticsearch.security.auth.kibanaPassword` | Kibana server password to authenticate with Elasticsearch | `""` | +| `elasticsearch.security.auth.existingSecret` | Name of the existing secret containing the Password for the Kibana user | `""` | +| `elasticsearch.security.tls.enabled` | Set to 'true' if Elasticsearch API uses TLS/SSL (HTTPS) | `false` | +| `elasticsearch.security.tls.verificationMode` | Verification mode for SSL communications. | `full` | +| `elasticsearch.security.tls.existingSecret` | Name of the existing secret containing Elasticsearch Truststore or CA certificate. Required unless verificationMode=none | `""` | +| `elasticsearch.security.tls.usePemCerts` | Set to 'true' to use PEM certificates instead of PKCS12. | `false` | +| `elasticsearch.security.tls.truststorePassword` | Password to access the PKCS12 trustore in case it is password-protected. | `""` | +| `elasticsearch.security.tls.passwordsSecret` | Name of a existing secret containing the Truststore password | `""` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set admin.user=admin-user bitnami/kibana +``` + +The above command sets the Kibana admin user to `admin-user`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/kibana +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change Kibana version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/change-image-version/). + +### Use custom configuration + +The Bitnami Kibana chart supports using custom configuration settings. For example, to mount a custom `kibana.yml` you can create a ConfigMap like the following: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: myconfig +data: + kibana.yml: |- + # Raw text of the file +``` + +And now you need to pass the ConfigMap name, to the corresponding parameter: `configurationCM=myconfig` + +An alternative is to provide extra configuration settings to the default kibana.yml that the chart deploys. This is done using the `extraConfiguration` value: + +```yaml +extraConfiguration: + "server.maxPayloadBytes": 1048576 + "server.pingTimeout": 1500 +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: ELASTICSEARCH_VERSION + value: 6 +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Use custom initialization scripts + +For advanced operations, the Bitnami Kibana chart allows using custom initialization scripts that will be mounted in `/docker-entrypoint.init-db`. Mount these extra scripts using a ConfigMap or a Secret (in case of sensitive data) and specify them via the `initScriptsCM` and `initScriptsSecret` chart parameters. Refer to the [chart documentation on custom initialization scripts](https://docs.bitnami.com/kubernetes/apps/kibana/administration/use-custom-init-scripts/) for an example. + +### Install plugins + +The Bitnami Kibana chart allows you to install a set of plugins at deployment time using the `plugins` chart parameter. Refer to the [chart documentation on installing plugins](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/install-plugins/) for an example. + +```console +elasticsearch.hosts[0]=elasticsearch-host +elasticsearch.port=9200 +plugins[0]=https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip +``` + +> **NOTE** Make sure that the plugin is available for the Kibana version you are deploying + +### Import saved objects + +If you have visualizations and dashboards (in NDJSON format) to import to Kibana, create a ConfigMap that includes them and then install the chart with the `savedObjects.configmap` or `savedObjects.urls` parameters. Refer to the [chart documentation on importing saved objects](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/import-saved-objects/) for an example. + +### Use Sidecars and Init Containers + +If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter. Similarly, extra init containers can be added using the `initContainers` parameter. + +Refer to the chart documentation for more information on, and examples of, configuring and using [sidecars and init containers](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/configure-sidecar-init-containers/). + +#### Add a sample Elasticsearch container as sidecar + +This chart requires an Elasticsearch instance to work. For production, the options are to use an already existing Elasticsearch instance or deploy the [Elasticsearch chart](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch) with the [`global.kibanaEnabled=true` parameter](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch#enable-bundled-kibana). + +For testing purposes, use a sidecar Elasticsearch container setting the following parameters during the Kibana chart installation: + +``` +elasticsearch.hosts[0]=localhost +elasticsearch.port=9200 +sidecars[0].name=elasticsearch +sidecars[0].image=bitnami/elasticsearch:latest +sidecars[0].imagePullPolicy=IfNotPresent +sidecars[0].ports[0].name=http +sidecars[0].ports[0].containerPort=9200 +``` + +### Set Pod affinity + +This chart allows you to set custom Pod affinity using the `affinity` parameter. Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Persistence + +The [Bitnami Kibana](https://github.com/bitnami/bitnami-docker-kibana) image can persist data. If enabled, the persisted path is `/bitnami/kibana` by default. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Add extra volumes + +The Bitnami Kibana chart supports mounting extra volumes (either PVCs, secrets or configmaps) by using the `extraVolumes` and `extraVolumeMounts` property. This can be combined with advanced operations like adding extra init containers and sidecars. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 9.0.0 + +This version updates the settings used to communicate Kibana with Elasticsearch, adapting it to Elasticsearch X-Pack Security features. + +Previous setting `elasticsearch.tls` has been replaced with `elasticsearch.security.tls.enabled`. Other settings regarding certificate verification can be found under `elasticsearch.security.tls.*`, such as verification method and custom truststore. + +Additionally, support for the Kibana server using TLS/SSL encryption (HTTPS for port 5601) has been added. + +### To 8.0.0 + +The Kibana container configuration logic was migrated to bash. + +From this version onwards, Kibana container components are now licensed under the [Elastic License](https://www.elastic.co/licensing/elastic-license) that is not currently accepted as an Open Source license by the Open Source Initiative (OSI). + +Also, from now on, the Helm Chart will include the X-Pack plugin installed by default. + +Regular upgrade is compatible from previous versions. + +### To 6.2.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 6.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/apps/kibana/administration/upgrade-helm3/). + +### To 5.0.0 + +This version does not include Elasticsearch as a bundled dependency. From now on, you should specify an external Elasticsearch instance using the `elasticsearch.hosts[]` and `elasticsearch.port` [parameters](#parameters). + +### To 3.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version signifies this change. + +### To 2.0.0 + +This version enabled by default an initContainer that modify some kernel settings to meet the Elasticsearch requirements. + +Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below: + +- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html) +- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html) + +You can disable the initContainer using the `elasticsearch.sysctlImage.enabled=false` parameter. + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/.helmignore b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/Chart.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/Chart.yaml new file mode 100644 index 0000000..2c93878 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.13.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.13.0 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/README.md b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/README.md new file mode 100644 index 0000000..c090f74 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/README.md @@ -0,0 +1,347 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_affinities.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_capabilities.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..4ec8321 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_capabilities.tpl @@ -0,0 +1,139 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_errors.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_images.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_ingress.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_labels.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_names.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_names.tpl new file mode 100644 index 0000000..c8574d1 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_secrets.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_storage.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_tplvalues.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_utils.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..ea083a2 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_warnings.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_cassandra.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mariadb.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mongodb.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..a071ea4 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_postgresql.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_redis.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..5d72959 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_validations.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/ci/values-with-es.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/ci/values-with-es.yaml new file mode 100644 index 0000000..b306e4b --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/ci/values-with-es.yaml @@ -0,0 +1,5 @@ +elasticsearch: + hosts: + - elasticsearch-1 + - elasticsearch-2 + port: 9300 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/NOTES.txt b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/NOTES.txt new file mode 100644 index 0000000..1415cdb --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/NOTES.txt @@ -0,0 +1,56 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if or (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}} +###################################################################################################### +### ERROR: You did not provide the Elasticsearch external host or port in your 'helm install' call ### +###################################################################################################### + +Complete your Kibana deployment by running: + + helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/kibana \ + --set elasticsearch.hosts[0]=YOUR_ES_HOST,elasticsearch.port=YOUR_ES_PORT + +Replacing "YOUR_ES_HOST" and "YOUR_ES_PORT" placeholders by the proper values of your Elasticsearch deployment. + +{{- else -}} +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} + Get the Kibana URL and associate Kibana hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "Kibana URL: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "common.names.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward svc/{{ include "common.names.fullname" . }} 8080:{{ .Values.service.port }} +{{- end }} + +{{- if or .Values.ingress.enabled (contains "NodePort" .Values.service.type) (contains "LoadBalancer" .Values.service.type) }} + +WARNING: Kibana is externally accessible from the cluster but the dashboard does not contain authentication mechanisms. Make sure you follow the authentication guidelines in your Elastic stack. ++info https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html +{{- end }} + +{{- if .Values.metrics.enabled }} + +WARNING: For Prometheus metrics to work, make sure that the kibana-prometheus-exporter plugin is installed: ++info https://github.com/pjhampton/kibana-prometheus-exporter +{{- end }} + +{{- include "kibana.validateValues" . }} +{{- include "kibana.checkRollingTags" . }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/_helpers.tpl b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/_helpers.tpl new file mode 100644 index 0000000..7955351 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/_helpers.tpl @@ -0,0 +1,266 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper Kibana image name +*/}} +{{- define "kibana.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "kibana.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "kibana.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Return true if the deployment should include dashboards +*/}} +{{- define "kibana.importSavedObjects" -}} +{{- if or .Values.savedObjects.configmap .Values.savedObjects.urls }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Set Elasticsearch URL. +*/}} +{{- define "kibana.elasticsearch.url" -}} +{{- if .Values.elasticsearch.hosts -}} +{{- $totalHosts := len .Values.elasticsearch.hosts -}} +{{- $protocol := ternary "https" "http" .Values.elasticsearch.security.tls.enabled -}} +{{- range $i, $hostTemplate := .Values.elasticsearch.hosts -}} +{{- $host := tpl $hostTemplate $ }} +{{- printf "%s://%s:%s" $protocol $host (include "kibana.elasticsearch.port" $) -}} +{{- if (lt ( add1 $i ) $totalHosts ) }}{{- printf "," -}}{{- end }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Set Elasticsearch Port. +*/}} +{{- define "kibana.elasticsearch.port" -}} +{{- .Values.elasticsearch.port -}} +{{- end -}} + +{{/* +Set Elasticsearch PVC. +*/}} +{{- define "kibana.pvc" -}} +{{- .Values.persistence.existingClaim | default (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "kibana.initScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the initialization scripts configmap name. +*/}} +{{- define "kibana.initScriptsCM" -}} +{{- printf "%s" (tpl .Values.initScriptsCM $) -}} +{{- end -}} + +{{/* +Get the saved objects configmap name. +*/}} +{{- define "kibana.savedObjectsCM" -}} +{{- printf "%s" (tpl .Values.savedObjects.configmap $) -}} +{{- end -}} + +{{/* +Set Elasticsearch Port. +*/}} +{{- define "kibana.configurationCM" -}} +{{- .Values.configurationCM | default (printf "%s-conf" (include "common.names.fullname" .)) -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "kibana.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "kibana.validateValues.noElastic" .) -}} +{{- $messages := append $messages (include "kibana.validateValues.configConflict" .) -}} +{{- $messages := append $messages (include "kibana.validateValues.extraVolumes" .) -}} +{{- $messages := append $messages (include "kibana.validateValues.tls" .) -}} +{{- $messages := append $messages (include "kibana.validateValues.elasticsearch.auth" .) -}} +{{- $messages := append $messages (include "kibana.validateValues.elasticsearch.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kibana - must provide an ElasticSearch */}} +{{- define "kibana.validateValues.noElastic" -}} +{{- if and (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}} +kibana: no-elasticsearch + You did not specify an external Elasticsearch instance. + Please set elasticsearch.hosts and elasticsearch.port +{{- else if and (not .Values.elasticsearch.hosts) .Values.elasticsearch.port }} +kibana: missing-es-settings-host + You specified the external Elasticsearch port but not the host. Please + set elasticsearch.hosts +{{- else if and .Values.elasticsearch.hosts (not .Values.elasticsearch.port) }} +kibana: missing-es-settings-port + You specified the external Elasticsearch hosts but not the port. Please + set elasticsearch.port +{{- end -}} +{{- end -}} + +{{/* Validate values of Kibana - configuration conflict */}} +{{- define "kibana.validateValues.configConflict" -}} +{{- if and (.Values.extraConfiguration) (.Values.configurationCM) -}} +kibana: conflict-configuration + You specified a ConfigMap with kibana.yml and a set of settings to be added + to the default kibana.yml. Please only set either extraConfiguration or configurationCM +{{- end -}} +{{- end -}} + +{{/* Validate values of Kibana - Incorrect extra volume settings */}} +{{- define "kibana.validateValues.extraVolumes" -}} +{{- if and (.Values.extraVolumes) (not .Values.extraVolumeMounts) -}} +kibana: missing-extra-volume-mounts + You specified extra volumes but not mount points for them. Please set + the extraVolumeMounts value +{{- end -}} +{{- end -}} + +{{/* Validate values of Kibana - No certificates for Kibana server */}} +{{- define "kibana.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.tls.existingSecret) (not .Values.tls.autoGenerated) -}} +kibana: tls.enabled + In order to enable HTTPS for Kibana, you also need to provide an existing secret + containing the TLS certificates (--set tls.existingSecret="my-secret") or enable + auto-generated certificates (--set elasticsearch.security.auth.existingSecret="true"). +{{- end -}} +{{- end -}} + +{{/* Validate values of Kibana - No credentials for Elasticsearch auth */}} +{{- define "kibana.validateValues.elasticsearch.auth" -}} +{{- if and .Values.elasticsearch.security.auth.enabled (not .Values.elasticsearch.security.auth.kibanaPassword) (not .Values.elasticsearch.security.auth.existingSecret) -}} +kibana: missing-kibana-credentials + You enabled Elasticsearch authentication but you didn't provide the required credentials for + Kibana to connect. Please provide them (--set elasticsearch.security.auth.kibanaPassword="XXXXX") + or the name of an existing secret containing them (--set elasticsearch.security.auth.existingSecret="my-secret"). +{{- end -}} +{{- end -}} + +{{/* Validate values of Kibana - Elasticsearch HTTPS no trusted CA */}} +{{- define "kibana.validateValues.elasticsearch.tls" -}} +{{- if and .Values.elasticsearch.security.tls.enabled (ne "none" .Values.elasticsearch.security.tls.verificationMode) (not .Values.elasticsearch.security.tls.existingSecret) -}} +kibana: missing-elasticsearch-trusted-ca + You configured communication with Elasticsearch REST API using HTTPS and + verification enabled but no existing secret containing the Truststore or CA + certificate was provided (--set elasticsearch.security.tls.existingSecret="my-secret"). +{{- end -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "kibana.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Return the secret containing Kibana TLS certificates +*/}} +{{- define "kibana.tlsSecretName" -}} +{{- $secretName := .Values.tls.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "kibana.createTlsSecret" -}} +{{- if and .Values.tls.enabled .Values.tls.autoGenerated (not .Values.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +basePath URL in use by the APIs. +*/}} +{{- define "kibana.basePath" -}} +{{- if (.Values.configuration.server.rewriteBasePath) }} +{{- .Values.configuration.server.basePath -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a Passwords secret object should be created +*/}} +{{- define "kibana.createSecret" -}} +{{- $kibanaPassword := and .Values.elasticsearch.security.auth.enabled (not .Values.elasticsearch.security.auth.existingSecret) -}} +{{- $serverTlsPassword := and .Values.tls.enabled (or .Values.tls.keystorePassword .Values.tls.keyPassword) (not .Values.tls.passwordsSecret) -}} +{{- $elasticsearchTlsPassword := and .Values.elasticsearch.security.tls.enabled .Values.elasticsearch.security.tls.truststorePassword (not .Values.elasticsearch.security.tls.passwordsSecret) -}} +{{- if or $kibanaPassword $serverTlsPassword $elasticsearchTlsPassword }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of secret containing the Elasticsearch auth credentials +*/}} +{{- define "kibana.elasticsearch.auth.secretName" -}} +{{- if .Values.elasticsearch.security.auth.existingSecret -}} + {{- printf "%s" .Values.elasticsearch.security.auth.existingSecret -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of secret containing the Elasticsearch auth credentials +*/}} +{{- define "kibana.elasticsearch.tls.secretName" -}} +{{- if .Values.elasticsearch.security.tls.passwordsSecret -}} + {{- printf "%s" .Values.elasticsearch.security.tls.passwordsSecret -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of secret containing the Elasticsearch auth credentials +*/}} +{{- define "kibana.tls.secretName" -}} +{{- if .Values.tls.passwordsSecret -}} + {{- printf "%s" .Values.tls.passwordsSecret -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kibana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/configmap.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/configmap.yaml new file mode 100644 index 0000000..ec8bde5 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/configmap.yaml @@ -0,0 +1,20 @@ +{{- if and (not .Values.configurationCM) (and .Values.elasticsearch.hosts .Values.elasticsearch.port) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-conf + labels: {{- include "common.labels.standard" . | nindent 4 }} +data: + kibana.yml: | + pid.file: /opt/bitnami/kibana/tmp/kibana.pid + server.host: "::" + server.port: {{ .Values.containerPort }} + elasticsearch.hosts: [{{ include "kibana.elasticsearch.url" . }}] + {{- if .Values.configuration.server.basePath }} + server.basePath: {{ .Values.configuration.server.basePath | quote }} + {{- end }} + server.rewriteBasePath: {{ .Values.configuration.server.rewriteBasePath }} + {{- if .Values.extraConfiguration }} + {{- tpl (toYaml .Values.extraConfiguration) $ | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/deployment.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/deployment.yaml new file mode 100644 index 0000000..2e4021b --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/deployment.yaml @@ -0,0 +1,283 @@ +{{- if and .Values.elasticsearch.hosts .Values.elasticsearch.port -}} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + {{- if .Values.updateStrategy }} + strategy: {{- tpl (toYaml .Values.updateStrategy) $ | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + {{- if or .Values.podAnnotations (include "kibana.createTlsSecret" .) }} + annotations: + {{- if (include "kibana.createTlsSecret" .) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app: kibana + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kibana.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "kibana.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: "{{ template "kibana.volumePermissions.image" . }}" + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "/bitnami/kibana"] + securityContext: + runAsUser: 0 + resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: kibana-data + mountPath: /bitnami/kibana + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 10 }} + {{- end }} + {{- end }} + containers: + - name: kibana + image: {{ include "kibana.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: KIBANA_PORT_NUMBER + value: {{ .Values.containerPort | quote }} + - name: KIBANA_ELASTICSEARCH_URL + value: {{ include "kibana.elasticsearch.url" . | quote }} + - name: KIBANA_ELASTICSEARCH_PORT_NUMBER + value: {{ include "kibana.elasticsearch.port" . | quote }} + - name: KIBANA_FORCE_INITSCRIPTS + value: {{ .Values.forceInitScripts | quote }} + - name: KIBANA_SERVER_ENABLE_TLS + value: {{ ternary "true" "false" .Values.tls.enabled | quote }} + {{- if or .Values.tls.usePemCerts (include "kibana.createTlsSecret" . ) }} + - name: KIBANA_SERVER_TLS_USE_PEM + value: "true" + {{- end }} + {{- if and .Values.tls.enabled .Values.tls.usePemCerts (or .Values.tls.keyPassword .Values.tls.passwordsSecret) }} + - name: KIBANA_SERVER_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kibana.tls.secretName" . }} + key: kibana-key-password + {{- end }} + {{- if and .Values.tls.enabled (not .Values.tls.usePemCerts) (or .Values.tls.keystorePassword .Values.tls.passwordsSecret) }} + - name: KIBANA_SERVER_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kibana.tls.secretName" . }} + key: kibana-keystore-password + {{- end }} + {{- if .Values.elasticsearch.security.auth.enabled }} + - name: KIBANA_USERNAME + value: {{ .Values.elasticsearch.security.auth.kibanaUsername | quote }} + - name: KIBANA_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kibana.elasticsearch.auth.secretName" . }} + key: kibana-password + {{- end }} + - name: KIBANA_ELASTICSEARCH_ENABLE_TLS + value: {{ ternary "true" "false" .Values.elasticsearch.security.tls.enabled | quote }} + - name: KIBANA_ELASTICSEARCH_TLS_USE_PEM + value: {{ ternary "true" "false" .Values.elasticsearch.security.tls.usePemCerts | quote }} + - name: KIBANA_ELASTICSEARCH_TLS_VERIFICATION_MODE + value: {{ .Values.elasticsearch.security.tls.verificationMode | quote }} + {{- if and .Values.elasticsearch.security.tls.enabled (not .Values.elasticsearch.security.tls.usePemCerts) (or .Values.elasticsearch.security.tls.truststorePassword .Values.elasticsearch.security.tls.passwordsSecret) }} + - name: KIBANA_ELASTICSEARCH_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kibana.elasticsearch.tls.secretName" . }} + key: elasticsearch-truststore-password + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.containerPort }} + protocol: TCP + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + {{- if .Values.configuration.server.rewriteBasePath }} + path: {{ .Values.configuration.server.basePath }}/login + {{- else }} + path: /login + {{- end }} + port: http + scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + {{- if .Values.configuration.server.rewriteBasePath }} + path: {{ .Values.configuration.server.basePath }}/status + {{- else }} + path: /status + {{- end }} + port: http + scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.resources "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: kibana-data + mountPath: /bitnami/kibana + - name: kibana-config + mountPath: /bitnami/kibana/conf + {{- if .Values.tls.enabled }} + - name: kibana-certificates + mountPath: /opt/bitnami/kibana/config/certs/server + readOnly: true + {{- end }} + {{- if and .Values.elasticsearch.security.tls.enabled (not (eq .Values.elasticsearch.security.tls.verificationMode "none" )) }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/kibana/config/certs/elasticsearch + readOnly: true + {{- end }} + {{- if .Values.plugins }} + - name: plugins-init-scripts + mountPath: /docker-entrypoint-initdb.d/plugin-install + {{- end }} + {{- if (include "kibana.importSavedObjects" .) }} + - name: saved-objects-init-scripts + mountPath: /docker-entrypoint-initdb.d/saved-objects-import + {{- end }} + {{- if .Values.savedObjects.configmap }} + - name: saved-objects-configmap + mountPath: /bitnami/kibana/saved-objects + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: kibana-data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "kibana.pvc" . }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if .Values.tls.enabled }} + - name: kibana-certificates + secret: + secretName: {{ include "kibana.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if and .Values.elasticsearch.security.tls.enabled (ne .Values.elasticsearch.security.tls.verificationMode "none" ) }} + - name: elasticsearch-certificates + secret: + secretName: {{ required "A secret containing the Truststore or CA certificate for Elasticsearch is required" .Values.elasticsearch.security.tls.existingSecret }} + defaultMode: 256 + {{- end }} + - name: kibana-config + configMap: + name: {{ include "kibana.configurationCM" . }} + {{- if (include "kibana.importSavedObjects" .) }} + - name: saved-objects-init-scripts + configMap: + name: {{ include "common.names.fullname" . }}-saved-objects + defaultMode: 0755 + {{- end }} + {{- if .Values.plugins }} + - name: plugins-init-scripts + configMap: + name: {{ include "common.names.fullname" . }}-plugins + defaultMode: 0755 + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "kibana.initScriptsCM" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + name: {{ template "kibana.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.savedObjects.configmap }} + - name: saved-objects-configmap + configMap: + name: {{ template "kibana.savedObjectsCM" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/extra-list.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/ingress.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/ingress.yaml new file mode 100644 index 0000000..426e9b3 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations .Values.ingress.certManager }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - http: + paths: + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }} + {{- if ne .Values.ingress.hostname "*" }} + host: {{ .Values.ingress.hostname }} + {{- end }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or .Values.ingress.certManager .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraTls "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/plugins-configmap.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/plugins-configmap.yaml new file mode 100644 index 0000000..3fff719 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/plugins-configmap.yaml @@ -0,0 +1,18 @@ +{{- if .Values.plugins -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-plugins + labels: {{- include "common.labels.standard" . | nindent 4 }} +data: + install-plugins.sh: | + #!/bin/bash + echo "==> Plugin installation" + {{- $totalPlugins := len .Values.plugins }} + echo "Total plugins defined in chart installation: {{ $totalPlugins }}" + {{- range $i, $plugin := .Values.plugins }} + echo "Installing plugin {{ add $i 1 }} out of {{ $totalPlugins }}: {{ $plugin }}" + kibana-plugin install "{{ $plugin }}" + {{- end }} + echo "==> End of Plugin installation" +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/pvc.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/pvc.yaml new file mode 100644 index 0000000..2a86b70 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/pvc.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 2 }} +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/saved-objects-configmap.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/saved-objects-configmap.yaml new file mode 100644 index 0000000..e4b4179 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/saved-objects-configmap.yaml @@ -0,0 +1,39 @@ +{{- if (include "kibana.importSavedObjects" .) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-saved-objects + labels: {{- include "common.labels.standard" . | nindent 4 }} +data: + {{- $savedObjectsUrl := printf "localhost:%d%s/api/saved_objects/_import" (int .Values.containerPort) (include "kibana.basePath" .) }} + import-saved-objects.sh: | + #!/bin/bash + echo "==> Saved objects import" + {{- if .Values.savedObjects.urls }} + {{- $totalURLs := len .Values.savedObjects.urls }} + echo "Total saved objects NDJSON URLs to import: {{ $totalURLs }}" + {{- range $i, $url := .Values.savedObjects.urls }} + echo "Importing saved objects from NDJSON in url {{ add $i 1 }} out of {{ $totalURLs }}: {{ $url }}" + download_tmp_file="$(mktemp)" + curl "{{ $url }}" > "${download_tmp_file}.ndjson" + curl -s --connect-timeout 60 --max-time 60 -XPOST {{ $savedObjectsUrl }} -H 'kbn-xsrf:true' --form file=@${download_tmp_file}.ndjson + {{- end }} + {{- end }} + {{- if .Values.savedObjects.configmap }} + echo "Searching for dashboard NDJSON files from ConfigMap mounted in /bitnami/kibana/saved-objects" + ndjson_file_list_tmp="$(mktemp)" + find /bitnami/kibana/saved-objects -type f -regex ".*\.ndjson" > $ndjson_file_list_tmp + while read -r f; do + case "$f" in + *.ndjson) + echo "Importing $f" + curl -s --connect-timeout 60 --max-time 60 -XPOST {{ $savedObjectsUrl }} -H 'kbn-xsrf:true' --form file=@${f} + ;; + *) + echo "Ignoring $f" + ;; + esac + done < $ndjson_file_list_tmp + {{- end }} + echo "==> End of Saved objects import" +{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/secret.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/secret.yaml new file mode 100644 index 0000000..3204859 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/secret.yaml @@ -0,0 +1,29 @@ +{{- if (include "kibana.createSecret" .) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.elasticsearch.security.auth.enabled (not .Values.elasticsearch.security.auth.existingSecret) }} + kibana-password: {{ required "A Kibana password is required!" .Values.elasticsearch.security.auth.kibanaPassword | b64enc }} + {{- end }} + {{- if and .Values.tls.enabled (not .Values.tls.passwordsSecret) }} + {{- if .Values.tls.keyPassword }} + kibana-key-password: {{ .Values.tls.keyPassword | b64enc | quote }} + {{- end }} + {{- if .Values.tls.keystorePassword }} + kibana-keystore-password: {{ .Values.tls.keystorePassword | b64enc | quote }} + {{- end }} + {{- end }} + {{- if and .Values.elasticsearch.security.tls.enabled .Values.elasticsearch.security.tls.truststorePassword (not .Values.elasticsearch.security.tls.passwordsSecret) }} + elasticsearch-truststore-password: {{ .Values.elasticsearch.security.tls.truststorePassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/service.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/service.yaml new file mode 100644 index 0000000..bf8b87f --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/service.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +{{- if .Values.service.labels }} +{{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} +{{- end }} +{{- if or (and .Values.metrics.enabled .Values.metrics.service.annotations) .Values.service.annotations }} + annotations: + {{- if and .Values.metrics.enabled .Values.metrics.service.annotations }} + {{- tpl (toYaml .Values.metrics.service.annotations) $ | nindent 4 }} + {{- end }} + {{- if .Values.service.annotations }} + {{- tpl (toYaml .Values.service.annotations) $ | nindent 4 }} + {{- end }} +{{- end }} + +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + ports: + - name: http + port: {{ .Values.service.port }} + targetPort: http + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} +{{- if .Values.service.extraPorts }} + {{- tpl (toYaml .Values.service.extraPorts) $ | nindent 4 }} +{{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/serviceaccount.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/serviceaccount.yaml new file mode 100644 index 0000000..b6c5332 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kibana.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + annotations: + {{- if or .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/servicemonitor.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/servicemonitor.yaml new file mode 100644 index 0000000..e419825 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/servicemonitor.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http + path: "/_prometheus/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/tls-secret.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/tls-secret.yaml new file mode 100644 index 0000000..771967a --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/templates/tls-secret.yaml @@ -0,0 +1,72 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: kibana + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "kibana-ingress-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kibana + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +--- +{{- end }} +{{- end }} +{{- if (include "kibana.createTlsSecret" .) }} +{{- $ca := genCA "kibana-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $serviceName }} +{{- $crt := genSignedCert $serviceName nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/values.yaml new file mode 100644 index 0000000..c72bfeb --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/charts/kibana/values.yaml @@ -0,0 +1,577 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template with a string (will prepend the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template with a string +## +fullnameOverride: "" +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## @section Kibana parameters + +## Bitnami Kibana image version +## ref: https://hub.docker.com/r/bitnami/kibana/tags/ +## @param image.registry Kibana image registry +## @param image.repository Kibana image repository +## @param image.tag Kibana image tag (immutable tags are recommended) +## @param image.pullPolicy Kibana image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## +image: + registry: docker.io + repository: bitnami/kibana + tag: 7.17.2-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] +## @param replicaCount Number of replicas of the Kibana Pod +## +replicaCount: 1 +## @param updateStrategy.type Set up update strategy for Kibana installation. +## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first. +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +## Example: +## updateStrategy: +## type: RollingUpdate +## rollingUpdate: +## maxSurge: 25% +## maxUnavailable: 25% +## +updateStrategy: + type: RollingUpdate +## @param schedulerName Alternative scheduler +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param hostAliases Add deployment host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param plugins Array containing the Kibana plugins to be installed in deployment +## eg: +## plugins: +## - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip +## +plugins: [] +## Saved objects to import (NDJSON format) +## +savedObjects: + ## @param savedObjects.urls Array containing links to NDJSON files to be imported during Kibana initialization + ## e.g: + ## urls: + ## - www.example.com/dashboard.ndjson + ## + urls: [] + ## @param savedObjects.configmap Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template) + ## + configmap: "" +## @param extraConfiguration Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template +## +extraConfiguration: {} +## @param configurationCM ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml +## +configurationCM: "" +## @param extraEnvVars Array containing extra env vars to configure Kibana +## For example: +## extraEnvVars: +## - name: KIBANA_ELASTICSEARCH_URL +## value: test +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap containing extra env vars to configure Kibana +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret containing extra env vars to configure Kibana (in case of sensitive data) +## +extraEnvVarsSecret: "" +## @param extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts` +## +extraVolumes: [] +## @param extraVolumeMounts Array to add extra mounts. Normally used with `extraVolumes` +## +extraVolumeMounts: [] +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image name + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r383 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param volumePermissions.resources Volume Permissions resources + ## resources: + ## requests: + ## memory: 128Mi + ## cpu: 100m + resources: {} +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable persistence + ## + enabled: true + ## @param persistence.storageClass Kibana data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.existingClaim Provide an existing `PersistentVolumeClaim` + ## + existingClaim: "" + ## @param persistence.accessMode Access mode to the PV + ## + accessMode: ReadWriteOnce + ## @param persistence.size Size for the PV + ## + size: 10Gi +## Configure extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## @param livenessProbe.enabled Enable/disable the Liveness probe +## @param livenessProbe.initialDelaySeconds Delay before liveness probe is initiated +## @param livenessProbe.periodSeconds How often to perform the probe +## @param livenessProbe.timeoutSeconds When the probe times out +## @param livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded. +## @param livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed. +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +## Configure extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## @param readinessProbe.enabled Enable/disable the Readiness probe +## @param readinessProbe.initialDelaySeconds Delay before readiness probe is initiated +## @param readinessProbe.periodSeconds How often to perform the probe +## @param readinessProbe.timeoutSeconds When the probe times out +## @param readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded. +## @param readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed. +## +readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +## @param forceInitScripts Force execution of init scripts +## +forceInitScripts: false +## @param initScriptsCM Configmap with init scripts to execute +## +initScriptsCM: "" +## @param initScriptsSecret Secret with init scripts to execute (for sensitive data) +## +initScriptsSecret: "" +## Service configuration +## +service: + ## @param service.port Kubernetes Service port + ## + port: 5601 + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Annotations for Kibana service (evaluated as a template) + ## This can be used to set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param service.labels Extra labels for Kibana service + ## + labels: {} + ## @param service.loadBalancerIP loadBalancerIP if Kibana service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value) + ## + extraPorts: [] +## Configure the ingress resource that allows you to access the +## Kibana installation. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress controller resource + ## + enabled: false + ## DEPRECATED: Use ingress.annotations instead of ingress.certManager + ## certManager: false + ## + + ## @param ingress.pathType Ingress Path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Override API Version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.hostname Default host for the ingress resource. If specified as "*" no host rule is configured + ## + hostname: kibana.local + ## @param ingress.path The Path to Kibana. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## extraHosts: + ## - name: kibana.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths Additional arbitrary path/backend objects + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## extraTls: + ## - hosts: + ## - kibana.local + ## secretName: kibana.local-tls + ## + extraTls: [] + ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## - name: kibana.local-tls + ## key: + ## certificate: + ## + secrets: [] + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +## @param serviceAccount.create Enable creation of ServiceAccount for Kibana +## @param serviceAccount.name Name of serviceAccount +## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount +serviceAccount: + create: true + name: "" + annotations: {} + +## @param containerPort Port to expose at container level +## +containerPort: 5601 +## @param securityContext.enabled Enable securityContext on for Kibana deployment +## @param securityContext.fsGroup Group to configure permissions for volumes +## @param securityContext.runAsUser User for the security context +## @param securityContext.runAsNonRoot Set container's Security Context runAsNonRoot +## +securityContext: + enabled: true + runAsUser: 1001 + fsGroup: 1001 + runAsNonRoot: true +## Kibana resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the container +## @param resources.requests The requested resources for the container +## +resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## Allowed values: soft, hard +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param podAnnotations Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podLabels Extra labels to add to Pod +## +podLabels: {} +## @param sidecars Attach additional containers to the pod +## e.g. +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the pod +## e.g. +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param configuration [object] Kibana configuration +## +configuration: + server: + basePath: "" + rewriteBasePath: false +## Prometheus metrics (requires the kibana-prometheus-exporter plugin) +## +metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + service: + ## @param metrics.service.annotations [object] Prometheus annotations for the Kibana service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "80" + prometheus.io/path: "_prometheus/metrics" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## e.g: + ## selector: + ## prometheus: my-prometheus + ## + # selector: + # prometheus: my-prometheus + selector: {} + +## @section Kibana server TLS configuration +## +tls: + ## @param tls.enabled Enable SSL/TLS encryption for Kibana server (HTTPS) + ## + enabled: false + ## @param tls.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.existingSecret Name of the existing secret containing Kibana server certificates + ## + existingSecret: "" + ## @param tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of PKCS12 + ## Note: Ignored when using autoGenerated certs. + ## + usePemCerts: false + ## @param tls.keyPassword Password to access the PEM key when it is password-protected. + ## + keyPassword: "" + ## @param tls.keystorePassword Password to access the PKCS12 keystore when it is password-protected. + ## + keystorePassword: "" + ## @param tls.passwordsSecret Name of a existing secret containing the Keystore or PEM key password + ## + passwordsSecret: "" + +## @section Elasticsearch parameters +## +elasticsearch: + ## @param elasticsearch.hosts List of elasticsearch hosts to connect to. + ## e.g: + ## hosts: + ## - elasticsearch-1 + ## - elasticsearch-2 + ## + hosts: [] + ## @param elasticsearch.port Elasticsearch port + ## + port: "" + + security: + auth: + ## @param elasticsearch.security.auth.enabled Set to 'true' if Elasticsearch has authentication enabled + ## + enabled: false + ## @param elasticsearch.security.auth.kibanaUsername Kibana server user to authenticate with Elasticsearch + ## + kibanaUsername: "elastic" + ## @param elasticsearch.security.auth.kibanaPassword Kibana server password to authenticate with Elasticsearch + ## + kibanaPassword: "" + ## @param elasticsearch.security.auth.existingSecret Name of the existing secret containing the Password for the Kibana user + ## + existingSecret: "" + tls: + ## @param elasticsearch.security.tls.enabled Set to 'true' if Elasticsearch API uses TLS/SSL (HTTPS) + ## + enabled: false + ## @param elasticsearch.security.tls.verificationMode Verification mode for SSL communications. + ## Supported values: full, certificate, none. + ## Ref: https://www.elastic.co/guide/en/kibana/7.x/settings.html#elasticsearch-ssl-verificationmode + verificationMode: "full" + ## @param elasticsearch.security.tls.existingSecret Name of the existing secret containing Elasticsearch Truststore or CA certificate. Required unless verificationMode=none + ## + existingSecret: "" + ## @param elasticsearch.security.tls.usePemCerts Set to 'true' to use PEM certificates instead of PKCS12. + ## + usePemCerts: false + ## @param elasticsearch.security.tls.truststorePassword Password to access the PKCS12 trustore in case it is password-protected. + ## + truststorePassword: "" + ## @param elasticsearch.security.tls.passwordsSecret Name of a existing secret containing the Truststore password + ## + passwordsSecret: "" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/ci/ct-values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/ci/ct-values.yaml new file mode 100644 index 0000000..bb6c5dd --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/ci/ct-values.yaml @@ -0,0 +1,6 @@ +master: + replicas: 1 +data: + replicas: 1 +coordinating: + replicas: 1 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/config/Makefile deleted file mode 100644 index 9ae9c37..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -default: test - -include ../../../helpers/examples.mk - -RELEASE := helm-es-config -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -secrets: - kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true - kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic - kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' - kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key - kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test - -test: secrets install goss - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/config/README.md deleted file mode 100644 index 675ed96..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Config - -This example deploy a single node Elasticsearch 7.15.0 with authentication and -custom [values][]. - - -## Usage - -* Create the required secrets: `make secrets` - -* Deploy Elasticsearch chart with the default values: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/config-master 9200 - curl -u elastic:changeme http://localhost:9200/_cat/indices - ``` - - -## Testing - -You can also run [goss integration tests][] using `make test` - - -[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/config/test/goss.yaml -[values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/config/values.yaml diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/test/goss.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/config/test/goss.yaml deleted file mode 100644 index 455da36..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/test/goss.yaml +++ /dev/null @@ -1,29 +0,0 @@ -http: - http://localhost:9200/_cluster/health: - status: 200 - timeout: 2000 - username: "{{ .Env.ELASTIC_USERNAME }}" - password: "{{ .Env.ELASTIC_PASSWORD }}" - body: - - "green" - - '"number_of_nodes":1' - - '"number_of_data_nodes":1' - - http://localhost:9200: - status: 200 - timeout: 2000 - username: "{{ .Env.ELASTIC_USERNAME }}" - password: "{{ .Env.ELASTIC_PASSWORD }}" - body: - - '"cluster_name" : "config"' - - "You Know, for Search" - -command: - "elasticsearch-keystore list": - exit-status: 0 - stdout: - - keystore.seed - - bootstrap.password - - xpack.notification.slack.account.monitoring.secure_url - - xpack.notification.slack.account.otheraccount.secure_url - - xpack.watcher.encryption_key diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/config/values.yaml deleted file mode 100644 index d417ce8..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/values.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - -clusterName: "config" -replicas: 1 - -extraEnvs: - - name: ELASTIC_PASSWORD - valueFrom: - secretKeyRef: - name: elastic-config-credentials - key: password - - name: ELASTIC_USERNAME - valueFrom: - secretKeyRef: - name: elastic-config-credentials - key: username - -# This is just a dummy file to make sure that -# the keystore can be mounted at the same time -# as a custom elasticsearch.yml -esConfig: - elasticsearch.yml: | - xpack.security.enabled: true - path.data: /usr/share/elasticsearch/data - -keystore: - - secretName: elastic-config-secret - - secretName: elastic-config-slack - - secretName: elastic-config-custom-path - items: - - key: slack_url - path: xpack.notification.slack.account.otheraccount.secure_url diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/watcher_encryption_key b/deployment/deployment/middleware_deployment/elasticsearch/examples/config/watcher_encryption_key deleted file mode 100644 index b5f9078..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/config/watcher_encryption_key +++ /dev/null @@ -1 +0,0 @@ -supersecret diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/default/Makefile deleted file mode 100644 index 389bf99..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -default: test - -include ../../../helpers/examples.mk - -RELEASE := helm-es-default -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install $(RELEASE) ../../ - -test: install goss - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/default/README.md deleted file mode 100644 index d54e762..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Default - -This example deploy a 3 nodes Elasticsearch 7.15.0 cluster using -[default values][]. - - -## Usage - -* Deploy Elasticsearch chart with the default values: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/elasticsearch-master 9200 - curl localhost:9200/_cat/indices - ``` - - -## Testing - -You can also run [goss integration tests][] using `make test` - - -[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/default/test/goss.yaml -[default values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/values.yaml diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/rolling_upgrade.sh b/deployment/deployment/middleware_deployment/elasticsearch/examples/default/rolling_upgrade.sh deleted file mode 100644 index c5a2a88..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/rolling_upgrade.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -x - -kubectl proxy || true & - -make & -PROC_ID=$! - -while kill -0 "$PROC_ID" >/dev/null 2>&1; do - echo "PROCESS IS RUNNING" - if curl --fail 'http://localhost:8001/api/v1/proxy/namespaces/default/services/elasticsearch-master:9200/_search' ; then - echo "cluster is healthy" - else - echo "cluster not healthy!" - exit 1 - fi - sleep 1 -done -echo "PROCESS TERMINATED" -exit 0 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/test/goss.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/default/test/goss.yaml deleted file mode 100644 index a46d7e0..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/default/test/goss.yaml +++ /dev/null @@ -1,38 +0,0 @@ -kernel-param: - vm.max_map_count: - value: "262144" - -http: - http://elasticsearch-master:9200/_cluster/health: - status: 200 - timeout: 2000 - body: - - "green" - - '"number_of_nodes":3' - - '"number_of_data_nodes":3' - - http://localhost:9200: - status: 200 - timeout: 2000 - body: - - '"number" : "7.15.0"' - - '"cluster_name" : "elasticsearch"' - - "You Know, for Search" - -file: - /usr/share/elasticsearch/data: - exists: true - mode: "2775" - owner: root - group: elasticsearch - filetype: directory - -mount: - /usr/share/elasticsearch/data: - exists: true - -user: - elasticsearch: - exists: true - uid: 1000 - gid: 1000 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/Makefile deleted file mode 100644 index 18fd053..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -default: test - -RELEASE := helm-es-docker-for-mac -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -test: install - helm test $(RELEASE) - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/README.md deleted file mode 100644 index f16ef87..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Docker for Mac - -This example deploy a 3 nodes Elasticsearch 7.15.0 cluster on [Docker for Mac][] -using [custom values][]. - -Note that this configuration should be used for test only and isn't recommended -for production. - - -## Usage - -* Deploy Elasticsearch chart with the default values: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/elasticsearch-master 9200 - curl localhost:9200/_cat/indices - ``` - - -[custom values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/docker-for-mac/values.yaml -[docker for mac]: https://docs.docker.com/docker-for-mac/kubernetes/ diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/values.yaml deleted file mode 100644 index f7deba6..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/docker-for-mac/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Permit co-located instances for solitary minikube virtual machines. -antiAffinity: "soft" - -# Shrink default JVM heap. -esJavaOpts: "-Xmx128m -Xms128m" - -# Allocate smaller chunks of memory per pod. -resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "512M" - -# Request smaller persistent volumes. -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "hostpath" - resources: - requests: - storage: 100M diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/Makefile deleted file mode 100644 index 9e5602d..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -default: test - -RELEASE := helm-es-kind -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -install-local-path: - kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values-local-path.yaml $(RELEASE) ../../ - -test: install - helm test $(RELEASE) - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/README.md deleted file mode 100644 index 10ea2f8..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# KIND - -This example deploy a 3 nodes Elasticsearch 7.15.0 cluster on [Kind][] -using [custom values][]. - -Note that this configuration should be used for test only and isn't recommended -for production. - -Note that Kind < 0.7.0 are affected by a [kind issue][] with mount points -created from PVCs not writable by non-root users. [kubernetes-sigs/kind#1157][] -fix it in Kind 0.7.0. - -The workaround for Kind < 0.7.0 is to install manually -[Rancher Local Path Provisioner][] and use `local-path` storage class for -Elasticsearch volumes (see [Makefile][] instructions). - - -## Usage - -* For Kind >= 0.7.0: Deploy Elasticsearch chart with the default values: `make install` -* For Kind < 0.7.0: Deploy Elasticsearch chart with `local-path` storage class: `make install-local-path` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/elasticsearch-master 9200 - curl localhost:9200/_cat/indices - ``` - - -[custom values]: https://github.com/elastic/helm-charts/blob/7.15/elasticsearch/examples/kubernetes-kind/values.yaml -[kind]: https://kind.sigs.k8s.io/ -[kind issue]: https://github.com/kubernetes-sigs/kind/issues/830 -[kubernetes-sigs/kind#1157]: https://github.com/kubernetes-sigs/kind/pull/1157 -[rancher local path provisioner]: https://github.com/rancher/local-path-provisioner -[Makefile]: https://github.com/elastic/helm-charts/blob/7.15/elasticsearch/examples/kubernetes-kind/Makefile#L5 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values-local-path.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values-local-path.yaml deleted file mode 100644 index 500ad4b..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values-local-path.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Permit co-located instances for solitary minikube virtual machines. -antiAffinity: "soft" - -# Shrink default JVM heap. -esJavaOpts: "-Xmx128m -Xms128m" - -# Allocate smaller chunks of memory per pod. -resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "512M" - -# Request smaller persistent volumes. -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "local-path" - resources: - requests: - storage: 100M diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values.yaml deleted file mode 100644 index 500ad4b..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/kubernetes-kind/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Permit co-located instances for solitary minikube virtual machines. -antiAffinity: "soft" - -# Shrink default JVM heap. -esJavaOpts: "-Xmx128m -Xms128m" - -# Allocate smaller chunks of memory per pod. -resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "512M" - -# Request smaller persistent volumes. -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "local-path" - resources: - requests: - storage: 100M diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/Makefile deleted file mode 100644 index 2d0012d..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -default: test - -RELEASE := helm-es-microk8s -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -test: install - helm test $(RELEASE) - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/README.md deleted file mode 100644 index f8502f1..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# MicroK8S - -This example deploy a 3 nodes Elasticsearch 7.15.0 cluster on [MicroK8S][] -using [custom values][]. - -Note that this configuration should be used for test only and isn't recommended -for production. - - -## Requirements - -The following MicroK8S [addons][] need to be enabled: -- `dns` -- `helm` -- `storage` - - -## Usage - -* Deploy Elasticsearch chart with the default values: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/elasticsearch-master 9200 - curl localhost:9200/_cat/indices - ``` - - -[addons]: https://microk8s.io/docs/addons -[custom values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/microk8s/values.yaml -[MicroK8S]: https://microk8s.io diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/values.yaml deleted file mode 100644 index 2627ecb..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/microk8s/values.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# Disable privileged init Container creation. -sysctlInitContainer: - enabled: false - -# Restrict the use of the memory-mapping when sysctlInitContainer is disabled. -esConfig: - elasticsearch.yml: | - node.store.allow_mmap: false - -# Permit co-located instances for solitary minikube virtual machines. -antiAffinity: "soft" - -# Shrink default JVM heap. -esJavaOpts: "-Xmx128m -Xms128m" - -# Allocate smaller chunks of memory per pod. -resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "512M" - -# Request smaller persistent volumes. -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "microk8s-hostpath" - resources: - requests: - storage: 100M diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/Makefile deleted file mode 100644 index 020906f..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -PREFIX := helm-es-migration - -data: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ - -master: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ - -client: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/README.md deleted file mode 100644 index 75425d5..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# Migration Guide from helm/charts - -There are two viable options for migrating from the community Elasticsearch Helm -chart from the [helm/charts][] repo. - -1. Restoring from Snapshot to a fresh cluster -2. Live migration by joining a new cluster to the existing cluster. - -## Restoring from Snapshot - -This is the recommended and preferred option. The downside is that it will -involve a period of write downtime during the migration. If you have a way to -temporarily stop writes to your cluster then this is the way to go. This is also -a lot simpler as it just involves launching a fresh cluster and restoring a -snapshot following the [restoring to a different cluster guide][]. - -## Live migration - -If restoring from a snapshot is not possible due to the write downtime then a -live migration is also possible. It is very important to first test this in a -testing environment to make sure you are comfortable with the process and fully -understand what is happening. - -This process will involve joining a new set of master, data and client nodes to -an existing cluster that has been deployed using the [helm/charts][] community -chart. Nodes will then be replaced one by one in a controlled fashion to -decommission the old cluster. - -This example will be using the default values for the existing helm/charts -release and for the Elastic helm-charts release. If you have changed any of the -default values then you will need to first make sure that your values are -configured in a compatible way before starting the migration. - -The process will involve a re-sync and a rolling restart of all of your data -nodes. Therefore it is important to disable shard allocation and perform a synced -flush like you normally would during any other rolling upgrade. See the -[rolling upgrades guide][] for more information. - -* The default image for this chart is -`docker.elastic.co/elasticsearch/elasticsearch` which contains the default -distribution of Elasticsearch with a [basic license][]. Make sure to update the -`image` and `imageTag` values to the correct Docker image and Elasticsearch -version that you currently have deployed. - -* Convert your current helm/charts configuration into something that is -compatible with this chart. - -* Take a fresh snapshot of your cluster. If something goes wrong you want to be -able to restore your data no matter what. - -* Check that your clusters health is green. If not abort and make sure your -cluster is healthy before continuing: - - ``` - curl localhost:9200/_cluster/health - ``` - -* Deploy new data nodes which will join the existing cluster. Take a look at the -configuration in [data.yaml][]: - - ``` - make data - ``` - -* Check that the new nodes have joined the cluster (run this and any other curl -commands from within one of your pods): - - ``` - curl localhost:9200/_cat/nodes - ``` - -* Check that your cluster is still green. If so we can now start to scale down -the existing data nodes. Assuming you have the default amount of data nodes (2) -we now want to scale it down to 1: - - ``` - kubectl scale statefulsets my-release-elasticsearch-data --replicas=1 - ``` - -* Wait for your cluster to become green again: - - ``` - watch 'curl -s localhost:9200/_cluster/health' - ``` - -* Once the cluster is green we can scale down again: - - ``` - kubectl scale statefulsets my-release-elasticsearch-data --replicas=0 - ``` - -* Wait for the cluster to be green again. -* OK. We now have all data nodes running in the new cluster. Time to replace the -masters by firstly scaling down the masters from 3 to 2. Between each step make -sure to wait for the cluster to become green again, and check with -`curl localhost:9200/_cat/nodes` that you see the correct amount of master -nodes. During this process we will always make sure to keep at least 2 master -nodes as to not lose quorum: - - ``` - kubectl scale statefulsets my-release-elasticsearch-master --replicas=2 - ``` - -* Now deploy a single new master so that we have 3 masters again. See -[master.yaml][] for the configuration: - - ``` - make master - ``` - -* Scale down old masters to 1: - - ``` - kubectl scale statefulsets my-release-elasticsearch-master --replicas=1 - ``` - -* Edit the masters in [masters.yaml][] to 2 and redeploy: - - ``` - make master - ``` - -* Scale down the old masters to 0: - - ``` - kubectl scale statefulsets my-release-elasticsearch-master --replicas=0 - ``` - -* Edit the [masters.yaml][] to have 3 replicas and remove the -`discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then redeploy the -masters. This will make sure all 3 masters are running in the new cluster and -are pointing at each other for discovery: - - ``` - make master - ``` - -* Remove the `discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then -redeploy the data nodes to make sure they are pointing at the new masters: - - ``` - make data - ``` - -* Deploy the client nodes: - - ``` - make client - ``` - -* Update any processes that are talking to the existing client nodes and point -them to the new client nodes. Once this is done you can scale down the old -client nodes: - - ``` - kubectl scale deployment my-release-elasticsearch-client --replicas=0 - ``` - -* The migration should now be complete. After verifying that everything is -working correctly you can cleanup leftover resources from your old cluster. - -[basic license]: https://www.elastic.co/subscriptions -[data.yaml]: https://github.com/elastic/helm-charts/blob/7.15/elasticsearch/examples/migration/data.yaml -[helm/charts]: https://github.com/helm/charts/tree/7.15/stable/elasticsearch -[master.yaml]: https://github.com/elastic/helm-charts/blob/7.15/elasticsearch/examples/migration/master.yaml -[restoring to a different cluster guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-snapshots.html#_restoring_to_a_different_cluster -[rolling upgrades guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/client.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/client.yaml deleted file mode 100644 index 30ee700..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/client.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - -replicas: 2 - -clusterName: "elasticsearch" -nodeGroup: "client" - -esMajorVersion: 6 - -roles: - master: "false" - ingest: "false" - data: "false" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "standard" - resources: - requests: - storage: 1Gi # Currently needed till pvcs are made optional - -persistence: - enabled: false diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/data.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/data.yaml deleted file mode 100644 index eedcbb0..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/data.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -replicas: 2 - -esMajorVersion: 6 - -extraEnvs: - - name: discovery.zen.ping.unicast.hosts - value: "my-release-elasticsearch-discovery" - -clusterName: "elasticsearch" -nodeGroup: "data" - -roles: - master: "false" - ingest: "false" - data: "true" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/master.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/master.yaml deleted file mode 100644 index 3e3a2f1..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/migration/master.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - -# Temporarily set to 3 so we can scale up/down the old a new cluster -# one at a time whilst always keeping 3 masters running -replicas: 1 - -esMajorVersion: 6 - -extraEnvs: - - name: discovery.zen.ping.unicast.hosts - value: "my-release-elasticsearch-discovery" - -clusterName: "elasticsearch" -nodeGroup: "master" - -roles: - master: "true" - ingest: "false" - data: "false" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "standard" - resources: - requests: - storage: 4Gi diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/Makefile deleted file mode 100644 index 1021d98..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -default: test - -RELEASE := helm-es-minikube -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -test: install - helm test $(RELEASE) - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/README.md deleted file mode 100644 index 872fa1d..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Minikube - -This example deploy a 3 nodes Elasticsearch 7.15.0 cluster on [Minikube][] -using [custom values][]. - -If helm or kubectl timeouts occur, you may consider creating a minikube VM with -more CPU cores or memory allocated. - -Note that this configuration should be used for test only and isn't recommended -for production. - - -## Requirements - -In order to properly support the required persistent volume claims for the -Elasticsearch StatefulSet, the `default-storageclass` and `storage-provisioner` -minikube addons must be enabled. - -``` -minikube addons enable default-storageclass -minikube addons enable storage-provisioner -``` - - -## Usage - -* Deploy Elasticsearch chart with the default values: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/elasticsearch-master 9200 - curl localhost:9200/_cat/indices - ``` - - -[custom values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/minikube/values.yaml -[minikube]: https://minikube.sigs.k8s.io/docs/ diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/values.yaml deleted file mode 100644 index ccceb3a..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/minikube/values.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Permit co-located instances for solitary minikube virtual machines. -antiAffinity: "soft" - -# Shrink default JVM heap. -esJavaOpts: "-Xmx128m -Xms128m" - -# Allocate smaller chunks of memory per pod. -resources: - requests: - cpu: "100m" - memory: "512M" - limits: - cpu: "1000m" - memory: "512M" - -# Request smaller persistent volumes. -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "standard" - resources: - requests: - storage: 100M diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/Makefile deleted file mode 100644 index 243e504..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -default: test - -include ../../../helpers/examples.mk - -PREFIX := helm-es-multi -RELEASE := helm-es-multi-master -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ - helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ - helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ - -test: install goss - -purge: - helm del $(PREFIX)-master - helm del $(PREFIX)-data - helm del $(PREFIX)-client diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/README.md deleted file mode 100644 index dbb0614..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Multi - -This example deploy an Elasticsearch 7.15.0 cluster composed of 3 different Helm -releases: - -- `helm-es-multi-master` for the 3 master nodes using [master values][] -- `helm-es-multi-data` for the 3 data nodes using [data values][] -- `helm-es-multi-client` for the 3 client nodes using [client values][] - -## Usage - -* Deploy the 3 Elasticsearch releases: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/multi-master 9200 - curl -u elastic:changeme http://localhost:9200/_cat/indices - ``` - -## Testing - -You can also run [goss integration tests][] using `make test` - - -[client values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/multi/client.yaml -[data values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/multi/data.yaml -[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/multi/test/goss.yaml -[master values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/multi/master.yaml diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/client.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/client.yaml deleted file mode 100644 index dbe5b05..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/client.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -clusterName: "multi" -nodeGroup: "client" - -roles: - master: "false" - ingest: "false" - data: "false" - ml: "false" - remote_cluster_client: "false" - -persistence: - enabled: false diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/data.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/data.yaml deleted file mode 100644 index 2e3a909..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/data.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -clusterName: "multi" -nodeGroup: "data" - -roles: - master: "false" - ingest: "true" - data: "true" - ml: "false" - remote_cluster_client: "false" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/master.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/master.yaml deleted file mode 100644 index 6b8c082..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/master.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -clusterName: "multi" -nodeGroup: "master" - -roles: - master: "true" - ingest: "false" - data: "false" - ml: "false" - remote_cluster_client: "false" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/test/goss.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/test/goss.yaml deleted file mode 100644 index 794416b..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/multi/test/goss.yaml +++ /dev/null @@ -1,9 +0,0 @@ -http: - http://localhost:9200/_cluster/health: - status: 200 - timeout: 2000 - body: - - 'green' - - '"cluster_name":"multi"' - - '"number_of_nodes":9' - - '"number_of_data_nodes":3' diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/Makefile deleted file mode 100644 index e7b20c5..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -default: test - -include ../../../helpers/examples.mk - -RELEASE := helm-es-networkpolicy -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -test: install goss - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/values.yaml deleted file mode 100644 index 1963d20..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/networkpolicy/values.yaml +++ /dev/null @@ -1,37 +0,0 @@ -networkPolicy: - http: - enabled: true - explicitNamespacesSelector: - # Accept from namespaces with all those different rules (from whitelisted Pods) - matchLabels: - role: frontend-http - matchExpressions: - - {key: role, operator: In, values: [frontend-http]} - additionalRules: - - podSelector: - matchLabels: - role: frontend-http - - podSelector: - matchExpressions: - - key: role - operator: In - values: - - frontend-http - transport: - enabled: true - allowExternal: true - explicitNamespacesSelector: - matchLabels: - role: frontend-transport - matchExpressions: - - {key: role, operator: In, values: [frontend-transport]} - additionalRules: - - podSelector: - matchLabels: - role: frontend-transport - - podSelector: - matchExpressions: - - key: role - operator: In - values: - - frontend-transport diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/Makefile deleted file mode 100644 index 078c33c..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -default: test - -include ../../../helpers/examples.mk - -RELEASE := elasticsearch - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -test: install goss - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/README.md deleted file mode 100644 index 58a3847..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# OpenShift - -This example deploy a 3 nodes Elasticsearch 7.15.0 cluster on [OpenShift][] -using [custom values][]. - -## Usage - -* Deploy Elasticsearch chart with the default values: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/elasticsearch-master 9200 - curl localhost:9200/_cat/indices - ``` - -## Testing - -You can also run [goss integration tests][] using `make test` - - -[custom values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/openshift/values.yaml -[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/openshift/test/goss.yaml -[openshift]: https://www.openshift.com/ diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/test/goss.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/test/goss.yaml deleted file mode 100644 index cbae8d9..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/test/goss.yaml +++ /dev/null @@ -1,16 +0,0 @@ -http: - http://localhost:9200/_cluster/health: - status: 200 - timeout: 2000 - body: - - "green" - - '"number_of_nodes":3' - - '"number_of_data_nodes":3' - - http://localhost:9200: - status: 200 - timeout: 2000 - body: - - '"number" : "7.15.0"' - - '"cluster_name" : "elasticsearch"' - - "You Know, for Search" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/values.yaml deleted file mode 100644 index 8a21126..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/openshift/values.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -securityContext: - runAsUser: null - -podSecurityContext: - fsGroup: null - runAsUser: null - -sysctlInitContainer: - enabled: false diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/security/Makefile deleted file mode 100644 index beddbef..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -default: test - -include ../../../helpers/examples.mk - -RELEASE := helm-es-security -ELASTICSEARCH_IMAGE := docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) -TIMEOUT := 1200s - -install: - helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ - -test: secrets install goss - -purge: - kubectl delete secrets elastic-credentials elastic-certificates elastic-certificate-pem elastic-certificate-crt|| true - helm del $(RELEASE) - -pull-elasticsearch-image: - docker pull $(ELASTICSEARCH_IMAGE) - -secrets: - docker rm -f elastic-helm-charts-certs || true - rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 || true - password=$$([ ! -z "$$ELASTIC_PASSWORD" ] && echo $$ELASTIC_PASSWORD || echo $$(docker run --rm busybox:1.31.1 /bin/sh -c "< /dev/urandom tr -cd '[:alnum:]' | head -c20")) && \ - docker run --name elastic-helm-charts-certs -i -w /app \ - $(ELASTICSEARCH_IMAGE) \ - /bin/sh -c " \ - elasticsearch-certutil ca --out /app/elastic-stack-ca.p12 --pass '' && \ - elasticsearch-certutil cert --name security-master --dns security-master --ca /app/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /app/elastic-certificates.p12" && \ - docker cp elastic-helm-charts-certs:/app/elastic-certificates.p12 ./ && \ - docker rm -f elastic-helm-charts-certs && \ - openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \ - openssl x509 -outform der -in elastic-certificate.pem -out elastic-certificate.crt && \ - kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \ - kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \ - kubectl create secret generic elastic-certificate-crt --from-file=elastic-certificate.crt && \ - kubectl create secret generic elastic-credentials --from-literal=password=$$password --from-literal=username=elastic && \ - rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/security/README.md deleted file mode 100644 index a3d5643..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Security - -This example deploy a 3 nodes Elasticsearch 7.15.0 with authentication and -autogenerated certificates for TLS (see [values][]). - -Note that this configuration should be used for test only. For a production -deployment you should generate SSL certificates following the [official docs][]. - -## Usage - -* Create the required secrets: `make secrets` - -* Deploy Elasticsearch chart with the default values: `make install` - -* You can now setup a port forward to query Elasticsearch API: - - ``` - kubectl port-forward svc/security-master 9200 - curl -u elastic:changeme https://localhost:9200/_cat/indices - ``` - -## Testing - -You can also run [goss integration tests][] using `make test` - - -[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/security/test/goss.yaml -[official docs]: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/configuring-tls.html#node-certificates -[values]: https://github.com/elastic/helm-charts/tree/7.15/elasticsearch/examples/security/values.yaml diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/test/goss.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/security/test/goss.yaml deleted file mode 100644 index 6bb2243..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/test/goss.yaml +++ /dev/null @@ -1,44 +0,0 @@ -http: - https://security-master:9200/_cluster/health: - status: 200 - timeout: 2000 - allow-insecure: true - username: "{{ .Env.ELASTIC_USERNAME }}" - password: "{{ .Env.ELASTIC_PASSWORD }}" - body: - - "green" - - '"number_of_nodes":3' - - '"number_of_data_nodes":3' - - https://localhost:9200/: - status: 200 - timeout: 2000 - allow-insecure: true - username: "{{ .Env.ELASTIC_USERNAME }}" - password: "{{ .Env.ELASTIC_PASSWORD }}" - body: - - '"cluster_name" : "security"' - - "You Know, for Search" - - https://localhost:9200/_xpack/license: - status: 200 - timeout: 2000 - allow-insecure: true - username: "{{ .Env.ELASTIC_USERNAME }}" - password: "{{ .Env.ELASTIC_PASSWORD }}" - body: - - "active" - - "basic" - -file: - /usr/share/elasticsearch/config/elasticsearch.yml: - exists: true - contains: - - "xpack.security.enabled: true" - - "xpack.security.transport.ssl.enabled: true" - - "xpack.security.transport.ssl.verification_mode: certificate" - - "xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" - - "xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" - - "xpack.security.http.ssl.enabled: true" - - "xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" - - "xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/security/values.yaml deleted file mode 100644 index 04d932c..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/security/values.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -clusterName: "security" -nodeGroup: "master" - -roles: - master: "true" - ingest: "true" - data: "true" - -protocol: https - -esConfig: - elasticsearch.yml: | - xpack.security.enabled: true - xpack.security.transport.ssl.enabled: true - xpack.security.transport.ssl.verification_mode: certificate - xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 - xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 - xpack.security.http.ssl.enabled: true - xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 - xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 - -extraEnvs: - - name: ELASTIC_PASSWORD - valueFrom: - secretKeyRef: - name: elastic-credentials - key: password - - name: ELASTIC_USERNAME - valueFrom: - secretKeyRef: - name: elastic-credentials - key: username - -secretMounts: - - name: elastic-certificates - secretName: elastic-certificates - path: /usr/share/elasticsearch/config/certs diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/Makefile b/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/Makefile deleted file mode 100644 index 9251d3b..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -default: test - -include ../../../helpers/examples.mk - -CHART := elasticsearch -RELEASE := helm-es-upgrade -FROM := 7.4.0 # versions before 7.4.O aren't compatible with Kubernetes >= 1.16.0 - -install: - ../../../helpers/upgrade.sh --chart $(CHART) --release $(RELEASE) --from $(FROM) - kubectl rollout status statefulset upgrade-master - -test: install goss - -purge: - helm del $(RELEASE) diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/README.md b/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/README.md deleted file mode 100644 index 85977f5..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Upgrade - -This example will deploy a 3 node Elasticsearch cluster chart using an old chart -version, then upgrade it. - - -## Usage - -* Deploy and upgrade Elasticsearch chart with the default values: `make install` - - -## Testing - -You can also run [goss integration tests][] using `make test`. - - -[goss integration tests]: https://github.com/elastic/helm-charts/tree/master/elasticsearch/examples/upgrade/test/goss.yaml diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/scripts/upgrade.sh b/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/scripts/upgrade.sh deleted file mode 100644 index 59853e0..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/scripts/upgrade.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -usage() { - cat <<-EOF - USAGE: - $0 [--release ] [--from ] - $0 --help - - OPTIONS: - --release - Name of the Helm release to install - --from - Elasticsearch version to use for first install - EOF - exit 1 -} - -RELEASE="helm-es-upgrade" -FROM="" - -while [[ $# -gt 0 ]] -do - key="$1" - - case $key in - --help) - usage - ;; - --release) - RELEASE="$2" - shift 2 - ;; - --from) - FROM="$2" - shift 2 - ;; - *) - log "Unrecognized argument: '$key'" - usage - ;; - esac -done - -if ! command -v jq > /dev/null -then - echo 'jq is required to use this script' - echo 'please check https://stedolan.github.io/jq/download/ to install it' - exit 1 -fi - -# Elasticsearch chart < 7.4.0 are not compatible with K8S >= 1.16) -if [[ -z $FROM ]] -then - KUBE_MINOR_VERSION=$(kubectl version -o json | jq --raw-output --exit-status '.serverVersion.minor' | sed 's/[^0-9]*//g') - - if [ "$KUBE_MINOR_VERSION" -lt 16 ] - then - FROM="7.0.0-alpha1" - else - FROM="7.4.0" - fi -fi - -helm repo add elastic https://helm.elastic.co - -# Initial install -printf "Installing Elasticsearch chart %s\n" "$FROM" -helm upgrade --wait --timeout=600s --install "$RELEASE" elastic/elasticsearch --version "$FROM" --set clusterName=upgrade -kubectl rollout status sts/upgrade-master --timeout=600s - -# Upgrade -printf "Upgrading Elasticsearch chart\n" -helm upgrade --wait --timeout=600s --set terminationGracePeriod=121 --install "$RELEASE" ../../ --set clusterName=upgrade -kubectl rollout status sts/upgrade-master --timeout=600s diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/test/goss.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/test/goss.yaml deleted file mode 100644 index d035483..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/test/goss.yaml +++ /dev/null @@ -1,16 +0,0 @@ -http: - http://localhost:9200/_cluster/health: - status: 200 - timeout: 2000 - body: - - "green" - - '"number_of_nodes":3' - - '"number_of_data_nodes":3' - - http://localhost:9200: - status: 200 - timeout: 2000 - body: - - '"number" : "7.15.0"' - - '"cluster_name" : "upgrade"' - - "You Know, for Search" diff --git a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/values.yaml deleted file mode 100644 index de0283a..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/examples/upgrade/values.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -clusterName: upgrade diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/NOTES.txt b/deployment/deployment/middleware_deployment/elasticsearch/templates/NOTES.txt index 88b5dd5..a43fd39 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/NOTES.txt +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/NOTES.txt @@ -1,6 +1,132 @@ -1. Watch all cluster members come up. - $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "elasticsearch.uname" . }} -w -{{- if .Values.tests.enabled -}} -2. Test cluster health using Helm test. - $ helm --namespace={{ .Release.Namespace }} test {{ .Release.Name }} -{{- end -}} +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if contains .Values.coordinating.service.type "LoadBalancer" }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "coordinating.service.type=LoadBalancer" you have most likely + exposed the Elasticsearch service externally. + + Please note that Elasticsearch does not implement a authentication + mechanism to secure your cluster. For security reasons, we strongly + suggest that you switch to "ClusterIP" or "NodePort". +------------------------------------------------------------------------------- +{{- end }} +{{- if not .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Elasticsearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the ES containers fail to boot with ERROR messages. + + To check whether the host machine meets the requirements, run the command + below: + + kubectl logs --namespace {{ .Release.Namespace }} $(kubectl get --namespace {{ .Release.Namespace }} \ + pods -l app={{ template "common.names.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \ + elasticsearch + + You can adapt the Kernel parameters on you cluster as described in the + official documentation: + + https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster + + As an alternative, you can specify "sysctlImage.enabled=true" to use a + privileged initContainer to change those settings in the Kernel: + + helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/elasticsearch --set sysctlImage.enabled=true + + Note that this requires the ability to run privileged containers, which is likely not + the case on many secure clusters. To cover this use case, you can also set some parameters + in the config file to customize the default settings: + + https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html + https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html + + For that, you can place the desired parameters by using the "config" block present in the values.yaml + +{{- else if .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Elasticsearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the ES containers fail to boot with ERROR messages. + + More information about these requirements can be found in the links below: + + https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html + https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + + This chart uses a privileged initContainer to change those settings in the Kernel + by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536 + +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/elasticsearch/entrypoint.sh /opt/bitnami/scripts/elasticsearch/run.sh + +{{- else }} + +{{- if .Values.curator.enabled }} + + A CronJob will run with schedule {{ .Values.curator.cronjob.schedule }}. + + The Jobs will not be removed automagically when deleting this Helm chart. + To remove these jobs, run the following: + + kubectl --namespace {{ .Release.Namespace }} delete job -l app={{ template "common.names.name" . }},role=curator + +{{- end }} + + Elasticsearch can be accessed within the cluster on port {{ .Values.coordinating.service.port }} at {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + To access from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.coordinating.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.coordinating.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + curl http://$NODE_IP:$NODE_PORT/ +{{- else if contains "LoadBalancer" .Values.coordinating.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "elasticsearch.coordinating.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "elasticsearch.coordinating.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + curl http://$SERVICE_IP:{{ .Values.coordinating.service.port }}/ +{{- else if contains "ClusterIP" .Values.coordinating.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "elasticsearch.coordinating.fullname" . }} {{ .Values.coordinating.service.port }}:9200 & + curl http://127.0.0.1:9200/ +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.sysctlImage }} + +{{- end }} +{{ include "elasticsearch.validateValues" . }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/_helpers.tpl b/deployment/deployment/middleware_deployment/elasticsearch/templates/_helpers.tpl index d373f2a..31e537d 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/_helpers.tpl +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/_helpers.tpl @@ -1,65 +1,507 @@ {{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ES image name +*/}} +{{- define "elasticsearch.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + + +{{/* +Create a default fully qualified master name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.master.fullname" -}} +{{- if .Values.master.fullnameOverride -}} +{{- .Values.master.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.master.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified ingest name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.ingest.fullname" -}} +{{- if .Values.ingest.fullnameOverride -}} +{{- .Values.ingest.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.ingest.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified coordinating name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.coordinating.fullname" -}} +{{- if .Values.global.kibanaEnabled -}} +{{- printf "%s-%s" .Release.Name .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if .Values.coordinating -}} +{{- if .Values.coordinating.fullnameOverride -}} +{{- .Values.coordinating.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the hostname of every ElasticSearch seed node +*/}} +{{- define "elasticsearch.hosts" -}} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $releaseNamespace := .Release.Namespace }} +{{- if gt (.Values.master.replicas | int) 0 }} +{{- $masterFullname := include "elasticsearch.master.fullname" . }} +{{- $masterFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if gt (.Values.coordinating.replicas | int) 0 }} +{{- $coordinatingFullname := include "elasticsearch.coordinating.fullname" . }} +{{- $coordinatingFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if gt (.Values.data.replicas | int) 0 }} +{{- $dataFullname := include "elasticsearch.data.fullname" . }} +{{- $dataFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if and (eq .Values.ingest.enabled true) (gt (.Values.ingest.replicas | int) 0) }} +{{- $ingestFullname := include "elasticsearch.ingest.fullname" . }} +{{- $ingestFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified data name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.data.fullname" -}} +{{- if .Values.data.fullnameOverride -}} +{{- .Values.data.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.data.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{ template "elasticsearch.initScriptsSecret" . }} +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "elasticsearch.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} + +{{ template "elasticsearch.initScriptsCM" . }} +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "elasticsearch.initScriptsCM" -}} +{{- printf "%s" .Values.initScriptsCM -}} +{{- end -}} + +{{ template "elasticsearch.initScriptsSecret" . }} +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "elasticsearch.initScriptsSecret" -}} +{{- printf "%s" .Values.initScriptsSecret -}} +{{- end -}} + +{{/* + Create the name of the master service account to use + */}} +{{- define "elasticsearch.master.serviceAccountName" -}} +{{- if .Values.master.serviceAccount.create -}} + {{ default (include "elasticsearch.master.fullname" .) .Values.master.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.master.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the coordinating-only service account to use + */}} +{{- define "elasticsearch.coordinating.serviceAccountName" -}} +{{- if .Values.coordinating.serviceAccount.create -}} + {{ default (include "elasticsearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.coordinating.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the data service account to use + */}} +{{- define "elasticsearch.data.serviceAccountName" -}} +{{- if .Values.data.serviceAccount.create -}} + {{ default (include "elasticsearch.data.fullname" .) .Values.data.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.data.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the ingest service account to use + */}} +{{- define "elasticsearch.ingest.serviceAccountName" -}} +{{- if .Values.ingest.serviceAccount.create -}} + {{ default (include "elasticsearch.ingest.fullname" .) .Values.ingest.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.ingest.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified metrics name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.metrics.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.metrics.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper ES exporter image name +*/}} +{{- define "elasticsearch.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper sysctl image name +*/}} +{{- define "elasticsearch.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctlImage "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "elasticsearch.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.curator.image .Values.sysctlImage .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "elasticsearch.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + {{/* -Expand the name of the chart. +Return the proper Storage Class +Usage: +{{ include "elasticsearch.storageClass" (dict "global" .Values.global "local" .Values.master) }} */}} -{{- define "elasticsearch.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- define "elasticsearch.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- if (eq "-" .global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .local.persistence.storageClass -}} + {{- if (eq "-" .local.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .local.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .local.persistence.storageClass -}} + {{- if (eq "-" .local.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .local.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob APIs. +*/}} +{{- define "cronjob.apiVersion" -}} +{{- if semverCompare "< 1.8-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "batch/v2alpha1" }} +{{- else if and (semverCompare ">=1.8-0" .Capabilities.KubeVersion.GitVersion) (semverCompare "< 1.21-0" .Capabilities.KubeVersion.GitVersion) -}} +{{- print "batch/v1beta1" }} +{{- else if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "batch/v1" }} +{{- end -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. */}} -{{- define "elasticsearch.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- define "elasticsearch.curator.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.curator.name | trunc 63 | trimSuffix "-" -}} {{- end -}} -{{- define "elasticsearch.uname" -}} -{{- if empty .Values.fullnameOverride -}} -{{- if empty .Values.nameOverride -}} -{{ .Values.clusterName }}-{{ .Values.nodeGroup }} +{{/* +Create the name of the service account to use +*/}} +{{- define "elasticsearch.curator.serviceAccountName" -}} +{{- if .Values.curator.serviceAccount.create -}} + {{ default (include "elasticsearch.curator.fullname" .) .Values.curator.serviceAccount.name }} {{- else -}} -{{ .Values.nameOverride }}-{{ .Values.nodeGroup }} + {{ default "default" .Values.curator.serviceAccount.name }} +{{- end -}} {{- end -}} + +{{/* +Return the proper ES curator image name +*/}} +{{- define "elasticsearch.curator.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.curator.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for master nodes. +*/}} +{{- define "elasticsearch.master.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.master.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} {{- else -}} -{{ .Values.fullnameOverride }} + {{- printf "%s-crt" (include "elasticsearch.master.fullname" .) -}} {{- end -}} {{- end -}} -{{- define "elasticsearch.masterService" -}} -{{- if empty .Values.masterService -}} -{{- if empty .Values.fullnameOverride -}} -{{- if empty .Values.nameOverride -}} -{{ .Values.clusterName }}-master +{{/* +Return the elasticsearch TLS credentials secret for data nodes. +*/}} +{{- define "elasticsearch.data.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.data.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} {{- else -}} -{{ .Values.nameOverride }}-master + {{- printf "%s-crt" (include "elasticsearch.data.fullname" .) -}} +{{- end -}} {{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for ingest nodes. +*/}} +{{- define "elasticsearch.ingest.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.ingest.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} {{- else -}} -{{ .Values.fullnameOverride }} + {{- printf "%s-crt" (include "elasticsearch.ingest.fullname" .) -}} +{{- end -}} {{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for coordinating-only nodes. +*/}} +{{- define "elasticsearch.coordinating.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.coordinating.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} {{- else -}} -{{ .Values.masterService }} + {{- printf "%s-crt" (include "elasticsearch.coordinating.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "elasticsearch.createTlsSecret" -}} +{{- if and .Values.security.enabled .Values.security.tls.autoGenerated (not (include "elasticsearch.security.tlsSecretsProvided" .)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if an authentication credentials secret object should be created +*/}} +{{- define "elasticsearch.createSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Elasticsearch authentication credentials secret name +*/}} +{{- define "elasticsearch.secretName" -}} +{{- coalesce .Values.security.existingSecret (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Return true if a TLS password secret object should be created +*/}} +{{- define "elasticsearch.createTlsPasswordsSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.passwordsSecret) (or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword ) }} + {{- true -}} {{- end -}} {{- end -}} -{{- define "elasticsearch.endpoints" -}} -{{- $replicas := int (toString (.Values.replicas)) }} -{{- $uname := (include "elasticsearch.uname" .) }} - {{- range $i, $e := untilStep 0 $replicas 1 -}} -{{ $uname }}-{{ $i }}, - {{- end -}} +{{/* +Return the Elasticsearch TLS password secret name +*/}} +{{- define "elasticsearch.tlsPasswordsSecret" -}} +{{- coalesce .Values.security.tls.passwordsSecret (printf "%s-tls-pass" (include "common.names.fullname" .)) -}} {{- end -}} -{{- define "elasticsearch.esMajorVersion" -}} -{{- if .Values.esMajorVersion -}} -{{ .Values.esMajorVersion }} +{{/* +Return the name of the http port. Whether or not security is enabeld: http or https +*/}} +{{- define "elasticsearch.httpPortName" -}} +{{- if .Values.security.enabled }} + {{- "https" -}} {{- else -}} -{{- $version := int (index (.Values.imageTag | splitList ".") 0) -}} - {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image) (not (eq $version 0)) -}} -{{ $version }} - {{- else -}} -7 - {{- end -}} + {{- "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "elasticsearch.configure.security" -}} +- name: ELASTICSEARCH_ENABLE_SECURITY + value: "true" +- name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.secretName" . }} + key: elasticsearch-password +- name: ELASTICSEARCH_ENABLE_FIPS_MODE + value: {{ .Values.security.fipsMode | quote }} +- name: ELASTICSEARCH_TLS_VERIFICATION_MODE + value: {{ .Values.security.tls.verificationMode | quote }} +- name: ELASTICSEARCH_ENABLE_REST_TLS + value: {{ ternary "true" "false" .Values.security.tls.restEncryption | quote }} +{{- if or (include "elasticsearch.createTlsSecret" .) .Values.security.tls.usePemCerts }} +- name: ELASTICSEARCH_TLS_USE_PEM + value: "true" +{{- else }} +- name: ELASTICSEARCH_KEYSTORE_LOCATION + value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.keystoreFilename }}" +- name: ELASTICSEARCH_TRUSTSTORE_LOCATION + value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.truststoreFilename }}" +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.keystorePassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.tlsPasswordsSecret" . }} + key: keystore-password +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.truststorePassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.tlsPasswordsSecret" . }} + key: truststore-password +{{- end }} +{{- if and .Values.security.tls.usePemCerts (or .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_KEY_PASSWORD + value: {{ .Values.security.tls.keyPassword | quote }} +{{- end }} +{{- end -}} + +{{/* +Returns true if at least 1 existing secret was provided +*/}} +{{- define "elasticsearch.security.tlsSecretsProvided" -}} +{{- $masterSecret :=.Values.security.tls.master.existingSecret -}} +{{- $dataSecret :=.Values.security.tls.data.existingSecret -}} +{{- $coordSecret :=.Values.security.tls.coordinating.existingSecret -}} +{{- $ingestSecret :=.Values.security.tls.ingest.existingSecret -}} +{{- $ingestEnabled := .Values.ingest.enabled -}} +{{- if or $masterSecret $dataSecret $coordSecret (and $ingestEnabled $ingestSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for master nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.master" -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.master.existingSecret) -}} +elasticsearch: security.tls.master.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch master nodes. + Provide the certificates using --set .Values.security.tls.master.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for data nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.data" -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.data.existingSecret) -}} +elasticsearch: security.tls.data.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch data nodes. + Provide the certificates using --set .Values.security.tls.data.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for coordinating-only nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.coordinating.existingSecret) -}} +elasticsearch: security.tls.coordinating.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch coordinating-only nodes. + Provide the certificates using --set .Values.security.tls.coordinating.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for ingest nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.ingest" -}} +{{- if and .Values.security.enabled .Values.ingest.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.ingest.existingSecret) -}} +elasticsearch: security.tls.ingest.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch ingest nodes. + Provide the certificates using --set .Values.security.tls.ingest.existingSecret="my-secret". +{{- end -}} {{- end -}} + +{{/* Validate values of Elasticsearch - TLS enabled but no certificates provided */}} +{{- define "elasticsearch.validateValues.security.tls" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.autoGenerated) (not (include "elasticsearch.security.tlsSecretsProvided" .)) -}} +elasticsearch: security.tls + In order to enable X-Pack Security, it is necessary to configure TLS. + Three different mechanisms can be used: + - Provide an existing secret containing the Keystore and Truststore for each role + - Provide an existing secret containing the PEM certificates for each role and enable `security.tls.usePemCerts=true` + - Enable using auto-generated certificates with `security.tls.autoGenerated=true` + Existing secrets containing either JKS/PKCS12 or PEM certificates can be provided using --set Values.security.tls.master.existingSecret=master-certs, + --set Values.security.tls.data.existingSecret=data-certs, --set Values.security.tls.coordinating.existingSecret=coordinating-certs, --set Values.security.tls.ingest.existingSecret=ingest-certs +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "elasticsearch.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.tls" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.master" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.data" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.ingest" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Sysctl set if less then +*/}} +{{- define "elasticsearch.sysctlIfLess" -}} +CURRENT=`sysctl -n {{ .key }}`; +DESIRED="{{ .value }}"; +if [ "$DESIRED" -gt "$CURRENT" ]; then + sysctl -w {{ .key }}={{ .value }}; +fi; {{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-curator.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-curator.yaml new file mode 100644 index 0000000..14ee395 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-curator.yaml @@ -0,0 +1,11 @@ +{{- if .Values.curator.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: curator +data: + action_file.yml: {{ required "A valid .Values.curator.configMaps.action_file_yml entry is required!" (toYaml .Values.curator.configMaps.action_file_yml | indent 2) }} + config.yml: {{ required "A valid .Values.curator.configMaps.config_yml entry is required!" (tpl (toYaml .Values.curator.configMaps.config_yml | indent 2) $) }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-es.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-es.yaml new file mode 100644 index 0000000..40efaa0 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-es.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.config .Values.extraConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +data: + {{- if .Values.config }} + elasticsearch.yml: |- + {{- toYaml .Values.config | nindent 4 }} + {{- end }} + {{- if .Values.extraConfig }} + my_elasticsearch.yml: |- + {{- toYaml .Values.extraConfig | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-initscripts.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-initscripts.yaml new file mode 100644 index 0000000..e764d3c --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap-initscripts.yaml @@ -0,0 +1,12 @@ +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.initScripts" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + component: master +data: +{{- with .Values.initScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{ end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap.yaml deleted file mode 100644 index 4274d8b..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/configmap.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.esConfig }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "elasticsearch.uname" . }}-config - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" -data: -{{- range $path, $config := .Values.esConfig }} - {{ $path }}: | -{{ $config | indent 4 -}} -{{- end -}} -{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-hpa.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-hpa.yaml new file mode 100644 index 0000000..59e330c --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-hpa.yaml @@ -0,0 +1,35 @@ +{{- if .Values.coordinating.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.coordinating.fullname" . }} + minReplicas: {{ .Values.coordinating.autoscaling.minReplicas }} + maxReplicas: {{ .Values.coordinating.autoscaling.maxReplicas }} + metrics: + {{- if .Values.coordinating.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.coordinating.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-statefulset.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-statefulset.yaml new file mode 100644 index 0000000..9b145c7 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-statefulset.yaml @@ -0,0 +1,280 @@ +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only +spec: + updateStrategy: + type: {{ .Values.coordinating.updateStrategy.type }} + {{- if (eq "Recreate" .Values.coordinating.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: coordinating-only + podManagementPolicy: Parallel + {{- if not .Values.coordinating.autoscaling.enabled }} + replicas: {{ .Values.coordinating.replicas }} + {{- end }} + serviceName: {{ template "elasticsearch.coordinating.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: coordinating-only + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only + {{- if .Values.coordinating.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.coordinating.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.coordinating.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.coordinating.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.schedulerName }} + schedulerName: {{ .Values.coordinating.schedulerName }} + {{- end }} + {{- if .Values.coordinating.priorityClassName }} + priorityClassName: {{ .Values.coordinating.priorityClassName | quote }} + {{- end }} + {{- if .Values.coordinating.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAntiAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.coordinating.nodeAffinityPreset.type "key" .Values.coordinating.nodeAffinityPreset.key "values" .Values.coordinating.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.coordinating.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.coordinating.serviceAccountName" . }} + {{- if or .Values.coordinating.podSecurityContext.enabled .Values.coordinating.securityContext.enabled }} + securityContext: + {{- if .Values.coordinating.podSecurityContext.enabled }} + {{- omit .Values.coordinating.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.coordinating.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.coordinating.initContainers .Values.sysctlImage.enabled }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.coordinating.containerSecurityContext.enabled .Values.coordinating.securityContext.enabled }} + securityContext: + {{- if .Values.coordinating.containerSecurityContext.enabled }} + {{- omit .Values.coordinating.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- else }} + runAsUser: {{ .Values.coordinating.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.coordinating.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "coordinating" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.coordinating.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.coordinating.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.coordinating.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.coordinating.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.coordinating.startupProbe.successThreshold }} + failureThreshold: {{ .Values.coordinating.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.coordinating.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.coordinating.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.coordinating.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.coordinating.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.coordinating.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.coordinating.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.coordinating.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.coordinating.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.coordinating.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.coordinating.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.coordinating.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.coordinating.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.coordinating.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.resources }} + resources: {{- toYaml .Values.coordinating.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.coordinating.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.coordinating.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + - name: "data" + emptyDir: {} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-svc.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-svc.yaml new file mode 100644 index 0000000..0acd7bd --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/coordinating-svc.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.coordinating.service.type | quote }} + {{- if and (eq .Values.coordinating.service.type "LoadBalancer") (not (empty .Values.coordinating.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.coordinating.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.coordinating.service.externalTrafficPolicy | quote }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + port: {{ .Values.coordinating.service.port }} + targetPort: {{ include "elasticsearch.httpPortName" . }} + {{- if and (or (eq .Values.coordinating.service.type "NodePort") (eq .Values.coordinating.service.type "LoadBalancer")) (not (empty .Values.coordinating.service.nodePort)) }} + nodePort: {{ .Values.coordinating.service.nodePort }} + {{- else if eq .Values.coordinating.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-transport + port: 9300 + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/cronjob.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/cronjob.yaml new file mode 100644 index 0000000..8e9c2de --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/cronjob.yaml @@ -0,0 +1,130 @@ +{{- if .Values.curator.enabled }} +apiVersion: {{ template "cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ template "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if .Values.curator.cronjob.annotations }} + annotations: {{- toYaml .Values.curator.cronjob.annotations | nindent 4 }} + {{- end }} +spec: + schedule: "{{ .Values.curator.cronjob.schedule }}" + {{- with .Values.curator.cronjob.concurrencyPolicy }} + concurrencyPolicy: {{ . }} + {{- end }} + {{- with .Values.curator.cronjob.failedJobsHistoryLimit }} + failedJobsHistoryLimit: {{ . }} + {{- end }} + {{- with .Values.curator.cronjob.successfulJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ . }} + {{- end }} + jobTemplate: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if .Values.curator.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.curator.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 12 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if .Values.curator.podAnnotations }} + annotations: {{- toYaml .Values.curator.podAnnotations | nindent 12 }} + {{- end }} + spec: + volumes: + - name: config-volume + configMap: + name: {{ template "elasticsearch.curator.fullname" . }} + {{- if .Values.curator.extraVolumes }} + {{- toYaml .Values.curator.extraVolumes | nindent 12 }} + {{- end }} + restartPolicy: {{ .Values.curator.cronjob.jobRestartPolicy }} + {{- if .Values.curator.priorityClassName }} + priorityClassName: {{ .Values.curator.priorityClassName | quote }} + {{- end }} +{{- include "elasticsearch.imagePullSecrets" . | indent 10 }} + {{- $initContainers := coalesce .Values.curator.initContainers .Values.curator.extraInitContainers -}} + {{- if $initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" $initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.curator.schedulerName }} + schedulerName: {{ .Values.curator.schedulerName }} + {{- end }} + {{- if .Values.curator.rbac.enabled }} + serviceAccountName: {{ include "elasticsearch.curator.serviceAccountName" . }} + {{- end }} + {{- if .Values.curator.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.curator.affinity "context" $) | nindent 12 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.curator.podAffinityPreset "component" "curator" "context" $) | nindent 14 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.curator.podAntiAffinityPreset "component" "curator" "context" $) | nindent 14 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.curator.nodeAffinityPreset.type "key" .Values.curator.nodeAffinityPreset.key "values" .Values.curator.nodeAffinityPreset.values) | nindent 14 }} + {{- end }} + {{- if .Values.curator.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.curator.nodeSelector "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.curator.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.curator.tolerations "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.curator.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.curator.topologySpreadConstraints "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.curator.securityContext }} + securityContext: {{- toYaml .Values.curator.securityContext | nindent 12 }} + {{- end }} + containers: + - name: {{ template "elasticsearch.curator.fullname" . }} + image: {{ template "elasticsearch.curator.image" . }} + imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }} + volumeMounts: + - name: config-volume + mountPath: /etc/es-curator + {{- if .Values.curator.extraVolumeMounts }} + {{- toYaml .Values.curator.extraVolumeMounts | nindent 16 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 16 }} + {{ else if .Values.curator.command }} + command: {{ toYaml .Values.curator.command | nindent 16 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 16 }} + {{- else if .Values.curator.dryrun }} + args: [ "--dry-run", "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] + {{- else }} + args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] + {{- end }} + env: + {{- if .Values.curator.env }} + {{- range $key,$value := .Values.curator.env }} + - name: {{ $key | upper | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.curator.envFromSecrets }} + {{- range $key,$value := .Values.curator.envFromSecrets }} + - name: {{ $key | upper | quote }} + valueFrom: + secretKeyRef: + name: {{ $value.from.secret | quote }} + key: {{ $value.from.key | quote }} + {{- end }} + {{- end }} + {{- if .Values.curator.resources }} + resources: {{- toYaml .Values.curator.resources | nindent 16 }} + {{- end }} + {{- if .Values.curator.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.curator.sidecars "context" $) | nindent 12 }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/data-hpa.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/data-hpa.yaml new file mode 100644 index 0000000..67bc687 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/data-hpa.yaml @@ -0,0 +1,35 @@ +{{- if .Values.data.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.data.fullname" . }} + minReplicas: {{ .Values.data.autoscaling.minReplicas }} + maxReplicas: {{ .Values.data.autoscaling.maxReplicas }} + metrics: + {{- if .Values.data.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.data.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.data.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.data.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/data-statefulset.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/data-statefulset.yaml new file mode 100644 index 0000000..f498597 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/data-statefulset.yaml @@ -0,0 +1,326 @@ +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data +spec: + updateStrategy: + type: {{ .Values.data.updateStrategy.type }} + {{- if (eq "OnDelete" .Values.data.updateStrategy.type) }} + rollingUpdate: null + {{- else if .Values.data.updateStrategy.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.data.updateStrategy.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: data + podManagementPolicy: Parallel + {{- if not .Values.data.autoscaling.enabled }} + replicas: {{ .Values.data.replicas }} + {{- end }} + serviceName: {{ template "elasticsearch.data.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: data + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data + {{- if .Values.data.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.data.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.data.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.data.priorityClassName }} + priorityClassName: {{ .Values.data.priorityClassName | quote }} + {{- end }} + {{- if .Values.data.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.data.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAffinityPreset "component" "data" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAntiAffinityPreset "component" "data" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.data.nodeAffinityPreset.type "key" .Values.data.nodeAffinityPreset.key "values" .Values.data.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.data.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.data.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.schedulerName }} + schedulerName: {{ .Values.data.schedulerName }} + {{- end }} + {{- if .Values.data.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.data.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.data.serviceAccountName" . }} + {{- if or .Values.data.podSecurityContext.enabled .Values.data.securityContext.enabled }} + securityContext: + {{- if .Values.data.podSecurityContext.enabled }} + {{- omit .Values.data.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.data.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.data.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.data.persistence.enabled) }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }} + - name: volume-permissions + image: {{ include "elasticsearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.data.securityContext.runAsUser }}:{{ .Values.data.securityContext.fsGroup }} //bitnami/elasticsearch/data + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: "/bitnami/elasticsearch/data" + {{- end }} + {{- if .Values.data.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.data.containerSecurityContext.enabled .Values.data.securityContext.enabled }} + securityContext: + {{- if .Values.data.containerSecurityContext.enabled }} + {{- omit .Values.data.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- else }} + runAsUser: {{ .Values.data.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }} + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.data.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "data" + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.data.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.data.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.data.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.data.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.data.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.data.startupProbe.successThreshold }} + failureThreshold: {{ .Values.data.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.data.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.data.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.data.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.data.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.data.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.data.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.data.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.data.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.data.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.data.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.data.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.data.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.data.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.data.resources }} + resources: {{- toYaml .Values.data.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: "config" + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + - name: "data" + mountPath: "/bitnami/elasticsearch/data" + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.data.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: "config" + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.data.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} +{{- if not .Values.data.persistence.enabled }} + - name: "data" + emptyDir: {} +{{- else if .Values.data.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.data.persistence.existingClaim }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: "data" + {{- if .Values.data.persistence.annotations }} + annotations: {{- toYaml .Values.data.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: {{- toYaml .Values.data.persistence.accessModes | nindent 10 }} + {{ $storage := dict "global" .Values.global "local" .Values.data }} + {{ include "elasticsearch.storageClass" $storage }} + resources: + requests: + storage: {{ .Values.data.persistence.size | quote }} + {{- if .Values.data.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.data.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.data.persistence.existingVolume }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/data-svc.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/data-svc.yaml new file mode 100644 index 0000000..76039bd --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/data-svc.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.data.service.annotations "context" $) | nindent 4 }} +spec: + type: ClusterIP + publishNotReadyAddresses: true + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + port: 9200 + targetPort: {{ include "elasticsearch.httpPortName" . }} + - name: tcp-transport + port: 9300 + targetPort: transport + nodePort: null + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: data diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/extra-list.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/hooks/job.install.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/hooks/job.install.yaml new file mode 100644 index 0000000..e655537 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/hooks/job.install.yaml @@ -0,0 +1,73 @@ +{{- if .Values.curator.enabled }} +{{- range $kind, $enabled := .Values.curator.hooks }} +{{- if $enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "elasticsearch.curator.fullname" . }}-curator-on-{{ $kind }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + annotations: + "helm.sh/hook": post-{{ $kind }} + "helm.sh/hook-weight": "1" + {{- if $.Values.cronjob.annotations }} + {{- toYaml $.Values.cronjob.annotations | nindent 4 }} + {{- end }} +spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 10 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if $.Values.podAnnotations }} + annotations: {{- toYaml $.Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + restartPolicy: Never + {{- if $.Values.curator.priorityClassName }} + priorityClassName: {{ $.Values.curator.priorityClassName | quote }} + {{- end }} + {{- if .Values.curator.schedulerName }} + schedulerName: {{ .Values.curator.schedulerName }} + {{- end }} + {{- if $.Values.curator.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.curator.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.curator.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.tolerations "context" $) | nindent 8 }} + {{- end }} + containers: + - name: {{ template "elasticsearch.curator.fullname" . }} + image: {{ template "elasticsearch.curator.image" . }} + imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: [ "curator" ] + args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] + {{- end }} + resources: {{- toYaml $.Values.curator.resources | nindent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/es-curator + {{- if $.Values.curator.extraVolumeMounts }} + {{- toYaml $.Values.curator.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ template "elasticsearch.curator.fullname" . }} + {{- if $.Values.curator.extraVolumes }} + {{- toYaml $.Values.curator.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-statefulset.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-statefulset.yaml new file mode 100644 index 0000000..e467f40 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-statefulset.yaml @@ -0,0 +1,280 @@ +{{- if .Values.ingest.enabled }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest +spec: + updateStrategy: + type: {{ .Values.ingest.updateStrategy.type }} + {{- if (eq "OnDelete" .Values.ingest.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: ingest + podManagementPolicy: Parallel + replicas: {{ .Values.ingest.replicas }} + serviceName: {{ template "elasticsearch.ingest.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: ingest + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest + {{- if .Values.ingest.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.ingest.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.ingest.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.ingest.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.schedulerName }} + schedulerName: {{ .Values.ingest.schedulerName }} + {{- end }} + {{- if .Values.ingest.priorityClassName }} + priorityClassName: {{ .Values.ingest.priorityClassName | quote }} + {{- end }} + {{- if .Values.ingest.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAffinityPreset "component" "ingest" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAntiAffinityPreset "component" "ingest" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.ingest.nodeAffinityPreset.type "key" .Values.ingest.nodeAffinityPreset.key "values" .Values.ingest.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.ingest.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.ingest.serviceAccountName" . }} + {{- if or .Values.ingest.podSecurityContext.enabled .Values.ingest.securityContext.enabled }} + securityContext: + {{- if .Values.ingest.podSecurityContext.enabled }} + {{- omit .Values.ingest.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.ingest.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.ingest.initContainers .Values.sysctlImage.enabled }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.ingest.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.ingest.containerSecurityContext.enabled .Values.ingest.securityContext.enabled }} + securityContext: + {{- if .Values.ingest.containerSecurityContext.enabled }} + {{- omit .Values.ingest.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- else }} + runAsUser: {{ .Values.ingest.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas (eq .Values.data.autoscaling.enabled true)) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) }} + value: {{ range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.ingest.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.ingest.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "ingest" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.ingest.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.ingest.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.ingest.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.ingest.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.ingest.startupProbe.successThreshold }} + failureThreshold: {{ .Values.ingest.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.ingest.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.ingest.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.ingest.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.ingest.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.ingest.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.ingest.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.ingest.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.ingest.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.ingest.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.ingest.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.ingest.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.ingest.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.ingest.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.ingest.resources }} + resources: {{- toYaml .Values.ingest.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.ingest.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.ingest.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + - name: "data" + emptyDir: {} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-svc.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-svc.yaml new file mode 100644 index 0000000..5527dcb --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/ingest-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.ingest.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.ingest.service.type | quote }} + {{- if and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + port: 9200 + targetPort: {{ include "elasticsearch.httpPortName" . }} + - name: tcp-transport + port: {{ .Values.ingest.service.port }} + targetPort: transport + {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePort)) }} + nodePort: {{ .Values.ingest.service.nodePort }} + {{- else if eq .Values.ingest.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: ingest +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/ingress.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/ingress.yaml index 7be364e..16c8117 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/ingress.yaml +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/ingress.yaml @@ -1,54 +1,55 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "elasticsearch.uname" . -}} -{{- $httpPort := .Values.httpPort -}} -{{- $ingressPath := .Values.ingress.path -}} -apiVersion: networking.k8s.io/v1 +{{- if .Values.master.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} kind: Ingress metadata: - name: {{ $fullName }} - labels: - app: {{ .Chart.Name }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- with .Values.ingress.annotations }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.master.ingress.annotations .Values.commonAnnotations }} annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: - {{- if .ingressPath }} - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . }} - {{- end }} - secretName: {{ .secretName }} + {{- if .Values.master.ingress.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.ingress.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} - {{- else }} -{{ toYaml .Values.ingress.tls | indent 4 }} {{- end }} -{{- end }} +spec: rules: - {{- range .Values.ingress.hosts }} - {{- if $ingressPath }} - - host: {{ . }} - http: + {{- if .Values.master.ingress.hostname }} + - http: paths: - - path: {{ $ingressPath }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $httpPort }} - {{- else }} - - host: {{ .host }} + - path: {{ .Values.master.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.master.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "elasticsearch.master.fullname" .) "servicePort" (include "elasticsearch.httpPortName" .) "context" $) | nindent 14 }} + {{- if ne .Values.master.ingress.hostname "*" }} + host: {{ .Values.master.ingress.hostname }} + {{- end }} + {{- end }} + {{- range .Values.master.ingress.extraHosts }} + - host: {{ .name }} http: paths: - {{- range .paths }} - - path: {{ .path }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ .servicePort | default $httpPort }} - {{- end }} + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "elasticsearch.master.fullname" $) "servicePort" (include "elasticsearch.httpPortName" $) "context" $) | nindent 14 }} + {{- end }} + {{- if or .Values.master.ingress.tls .Values.master.ingress.extraTls }} + tls: + {{- if .Values.master.ingress.tls }} + - hosts: + - {{ .Values.master.ingress.hostname }} + secretName: {{ printf "%s-tls" .Values.master.ingress.hostname }} + {{- end }} + {{- if .Values.master.ingress.extraTls }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.ingress.extraTls "context" $ ) | nindent 4 }} {{- end }} {{- end }} {{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/master-hpa.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/master-hpa.yaml new file mode 100644 index 0000000..d542337 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/master-hpa.yaml @@ -0,0 +1,35 @@ +{{- if .Values.master.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.master.fullname" . }} + minReplicas: {{ .Values.master.autoscaling.minReplicas }} + maxReplicas: {{ .Values.master.autoscaling.maxReplicas }} + metrics: + {{- if .Values.master.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.master.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.master.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.master.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/master-statefulset.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/master-statefulset.yaml new file mode 100644 index 0000000..7288568 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/master-statefulset.yaml @@ -0,0 +1,329 @@ +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master +spec: + updateStrategy: + type: {{ .Values.master.updateStrategy.type }} + {{- if (eq "OnDelete" .Values.master.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: master + podManagementPolicy: Parallel + {{- if not .Values.master.autoscaling.enabled }} + replicas: {{ .Values.master.replicas }} + {{- end }} + serviceName: {{ template "elasticsearch.master.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: master + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master + {{- if .Values.master.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.master.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.master.serviceAccountName" . }} + {{- if or .Values.master.podSecurityContext.enabled .Values.master.securityContext.enabled }} + securityContext: + {{- if .Values.master.podSecurityContext.enabled }} + {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.master.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.master.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.master.persistence.enabled) }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }} + - name: volume-permissions + image: {{ include "elasticsearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.master.securityContext.runAsUser }}:{{ .Values.master.securityContext.fsGroup }} //bitnami/elasticsearch/data + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: "/bitnami/elasticsearch/data" + {{- end }} + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.master.containerSecurityContext.enabled .Values.master.securityContext.enabled }} + securityContext: + {{- if .Values.master.containerSecurityContext.enabled }} + {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{ else }} + runAsUser: {{ .Values.master.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.master.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.master.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "master" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.master.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.startupProbe.successThreshold }} + failureThreshold: {{ .Values.master.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.master.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} +{{- if not .Values.master.persistence.enabled }} + - name: "data" + emptyDir: {} +{{- else if .Values.master.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.master.persistence.existingClaim }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: "data" + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: {{- toYaml .Values.master.persistence.accessModes | nindent 10 }} + {{ $storage := dict "global" .Values.global "local" .Values.master }} + {{ include "elasticsearch.storageClass" $storage }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.master.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.master.persistence.existingVolume }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/master-svc.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/master-svc.yaml new file mode 100644 index 0000000..e79e3c6 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/master-svc.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.master.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.master.service.type | quote }} + {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ include "elasticsearch.httpPortName" . }} + port: 9200 + targetPort: {{ include "elasticsearch.httpPortName" . }} + - name: tcp-transport + port: {{ .Values.master.service.port }} + targetPort: transport + {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) (not (empty .Values.master.service.nodePort)) }} + nodePort: {{ .Values.master.service.nodePort }} + {{- else if eq .Values.master.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-deploy.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-deploy.yaml new file mode 100644 index 0000000..f30e6b4 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-deploy.yaml @@ -0,0 +1,121 @@ +{{- if .Values.metrics.enabled }} +apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: metrics +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + replicas: 1 + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: metrics + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: metrics + {{- if .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- with .Values.metrics.podAnnotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.schedulerName }} + schedulerName: {{ .Values.metrics.schedulerName }} + {{- end }} + containers: + - name: metrics + image: {{ include "elasticsearch.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + args: + {{- $protocol := (ternary "https" "http" (and .Values.security.enabled .Values.security.tls.restEncryption)) }} + {{- if gt (int .Values.coordinating.replicas) 0 }} + # Prefer coordinating only nodes to do the initial metrics query + - --es.uri={{$protocol}}://{{ template "elasticsearch.coordinating.fullname" . }}:{{ .Values.coordinating.service.port }} + {{- else }} + # Using master nodes as there are no coordinating only nodes + - --es.uri={{$protocol}}://{{ include "elasticsearch.master.fullname" . }}:9200 + {{- end }} + - --es.all + {{- if .Values.security.tls.restEncryption }} + - --es.ssl-skip-verify + {{- end }} + {{- if .Values.metrics.extraArgs }} + {{- toYaml .Values.metrics.extraArgs | nindent 12 }} + {{- end }} + {{- end }} + env: + {{- if .Values.security.enabled }} + - name: ES_USERNAME + value: "elastic" + - name: ES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.secretName" . }} + key: elasticsearch-password + {{- end }} + ports: + - name: metrics + containerPort: 9114 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + httpGet: + path: /metrics + port: metrics + {{- else if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + httpGet: + path: /metrics + port: metrics + {{- else if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if .Values.metrics.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.nodeAffinityPreset.type "key" .Values.metrics.nodeAffinityPreset.key "values" .Values.metrics.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-svc.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-svc.yaml new file mode 100644 index 0000000..6e49a71 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/metrics-svc.yaml @@ -0,0 +1,17 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: http-metrics + port: 9114 + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/networkpolicy.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/networkpolicy.yaml deleted file mode 100644 index 62bb1bd..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/networkpolicy.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }} -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: {{ template "elasticsearch.uname" . }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" -spec: - podSelector: - matchLabels: - app: "{{ template "elasticsearch.uname" . }}" - ingress: # Allow inbound connections - -{{- if .Values.networkPolicy.http.enabled }} - # For HTTP access - - ports: - - port: {{ .Values.httpPort }} - from: - # From authorized Pods (having the correct label) - - podSelector: - matchLabels: - {{ template "elasticsearch.uname" . }}-http-client: "true" -{{- with .Values.networkPolicy.http.explicitNamespacesSelector }} - # From authorized namespaces - namespaceSelector: -{{ toYaml . | indent 12 }} -{{- end }} -{{- with .Values.networkPolicy.http.additionalRules }} - # Or from custom additional rules -{{ toYaml . | indent 8 }} -{{- end }} -{{- end }} - -{{- if .Values.networkPolicy.transport.enabled }} - # For transport access - - ports: - - port: {{ .Values.transportPort }} - from: - # From authorized Pods (having the correct label) - - podSelector: - matchLabels: - {{ template "elasticsearch.uname" . }}-transport-client: "true" -{{- with .Values.networkPolicy.transport.explicitNamespacesSelector }} - # From authorized namespaces - namespaceSelector: -{{ toYaml . | indent 12 }} -{{- end }} -{{- with .Values.networkPolicy.transport.additionalRules }} - # Or from custom additional rules -{{ toYaml . | indent 8 }} -{{- end }} - # Or from other ElasticSearch Pods - - podSelector: - matchLabels: - app: "{{ template "elasticsearch.uname" . }}" -{{- end }} - -{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/poddisruptionbudget.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/poddisruptionbudget.yaml deleted file mode 100644 index df6c74e..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -{{- if .Values.maxUnavailable }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: "{{ template "elasticsearch.uname" . }}-pdb" -spec: - maxUnavailable: {{ .Values.maxUnavailable }} - selector: - matchLabels: - app: "{{ template "elasticsearch.uname" . }}" -{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/podsecuritypolicy.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/podsecuritypolicy.yaml index d8b3545..2d0a3ff 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/podsecuritypolicy.yaml +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/podsecuritypolicy.yaml @@ -1,14 +1,34 @@ -{{- if .Values.podSecurityPolicy.create -}} -{{- $fullName := include "elasticsearch.uname" . -}} +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.curator.enabled .Values.curator.psp.create }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: - name: {{ default $fullName .Values.podSecurityPolicy.name | quote }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - app: {{ $fullName | quote }} + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator spec: -{{ toYaml .Values.podSecurityPolicy.spec | indent 2 }} -{{- end -}} + privileged: true + #requiredDropCapabilities: + volumes: + - 'configMap' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/role.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/role.yaml index d3a7ee3..9f34b99 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/role.yaml +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/role.yaml @@ -1,25 +1,21 @@ -{{- if .Values.rbac.create -}} -{{- $fullName := include "elasticsearch.uname" . -}} -apiVersion: rbac.authorization.k8s.io/v1 +{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }} kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} metadata: - name: {{ $fullName | quote }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - app: {{ $fullName | quote }} + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator + component: elasticsearch-curator-configmap rules: - - apiGroups: - - extensions - resources: - - podsecuritypolicies + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["update", "patch"] + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.curator.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] resourceNames: - {{- if eq .Values.podSecurityPolicy.name "" }} - - {{ $fullName | quote }} - {{- else }} - - {{ .Values.podSecurityPolicy.name | quote }} - {{- end }} - verbs: - - use -{{- end -}} + - {{ include "elasticsearch.curator.fullname" . }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/rolebinding.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/rolebinding.yaml index 7a529d9..658e028 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/rolebinding.yaml +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/rolebinding.yaml @@ -1,24 +1,17 @@ -{{- if .Values.rbac.create -}} -{{- $fullName := include "elasticsearch.uname" . -}} -apiVersion: rbac.authorization.k8s.io/v1 +{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }} kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} metadata: - name: {{ $fullName | quote }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - app: {{ $fullName | quote }} -subjects: - - kind: ServiceAccount - {{- if eq .Values.rbac.serviceAccountName "" }} - name: {{ $fullName | quote }} - {{- else }} - name: {{ .Values.rbac.serviceAccountName | quote }} - {{- end }} - namespace: {{ .Release.Namespace | quote }} + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator + component: elasticsearch-curator-configmap roleRef: kind: Role - name: {{ $fullName | quote }} + name: {{ template "elasticsearch.curator.fullname" . }} apiGroup: rbac.authorization.k8s.io -{{- end -}} +subjects: + - kind: ServiceAccount + name: {{ include "elasticsearch.curator.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/secrets.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/secrets.yaml new file mode 100644 index 0000000..140e3e8 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/secrets.yaml @@ -0,0 +1,54 @@ +{{- if (include "elasticsearch.createSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.security.elasticPassword }} + elasticsearch-password: {{ default "" .Values.security.elasticPassword | b64enc | quote }} + {{- else }} + elasticsearch-password: {{ randAlphaNum 14 | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.keystorePassword }} + keystore-password: {{ default "" .Values.security.tls.keystorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.truststorePassword }} + truststore-password: {{ default "" .Values.security.tls.truststorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.keyPassword }} + key-password: {{ default "" .Values.security.tls.keyPassword | b64enc | quote }} + {{- end }} +--- +{{- end }} +{{- if (include "elasticsearch.createTlsPasswordsSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-tls-pass + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.security.tls.keystorePassword }} + truststore-password: {{ default "" .Values.elasticPassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.truststorePassword }} + keystore-password: {{ default "" .Values.elasticPassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.elasticPassword }} + key-password: {{ default "" .Values.elasticPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/service.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/service.yaml deleted file mode 100644 index 1da6951..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/service.yaml +++ /dev/null @@ -1,77 +0,0 @@ ---- -{{- if .Values.service.enabled -}} -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "elasticsearch.masterService" . }} -{{- else }} - name: {{ template "elasticsearch.uname" . }} -{{- end }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" -{{- if .Values.service.labels }} -{{ toYaml .Values.service.labels | indent 4}} -{{- end }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -spec: - type: {{ .Values.service.type }} - selector: - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - protocol: TCP - port: {{ .Values.httpPort }} -{{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort }} -{{- end }} - - name: {{ .Values.service.transportPortName | default "transport" }} - protocol: TCP - port: {{ .Values.transportPort }} -{{- if .Values.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} -{{- end }} -{{- with .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: -{{ toYaml . | indent 4 }} -{{- end }} -{{- if .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} -{{- end }} -{{- end }} ---- -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "elasticsearch.masterService" . }}-headless -{{- else }} - name: {{ template "elasticsearch.uname" . }}-headless -{{- end }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" -{{- if .Values.service.labelsHeadless }} -{{ toYaml .Values.service.labelsHeadless | indent 4 }} -{{- end }} - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -spec: - clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve - # Create endpoints also if the related pod isn't ready - publishNotReadyAddresses: true - selector: - app: "{{ template "elasticsearch.uname" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - port: {{ .Values.httpPort }} - - name: {{ .Values.service.transportPortName | default "transport" }} - port: {{ .Values.transportPort }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/serviceaccount.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/serviceaccount.yaml index 801d1cf..60da547 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/serviceaccount.yaml +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/serviceaccount.yaml @@ -1,20 +1,44 @@ -{{- if .Values.rbac.create -}} -{{- $fullName := include "elasticsearch.uname" . -}} +{{- if and .Values.curator.enabled .Values.curator.serviceAccount.create .Values.curator.rbac.enabled }} apiVersion: v1 kind: ServiceAccount metadata: - {{- if eq .Values.rbac.serviceAccountName "" }} - name: {{ $fullName | quote }} - {{- else }} - name: {{ .Values.rbac.serviceAccountName | quote }} - {{- end }} - annotations: - {{- with .Values.rbac.serviceAccountAnnotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - app: {{ $fullName | quote }} -{{- end -}} + name: {{ include "elasticsearch.curator.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator +{{- end }} +{{- if .Values.data.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.data.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: data +{{- end }} +{{- if .Values.master.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.master.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: master +{{- end }} +{{- if .Values.coordinating.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.coordinating.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: coordinating-only +{{- end }} +{{- if and .Values.ingest.enabled .Values.ingest.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.ingest.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: ingest +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/servicemonitor.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/servicemonitor.yaml new file mode 100644 index 0000000..aad59f9 --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/servicemonitor.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.labels "context" $) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/statefulset.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/statefulset.yaml deleted file mode 100644 index e3a34c5..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/statefulset.yaml +++ /dev/null @@ -1,378 +0,0 @@ ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "elasticsearch.uname" . }} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" -spec: - serviceName: {{ template "elasticsearch.uname" . }}-headless - selector: - matchLabels: - app: "{{ template "elasticsearch.uname" . }}" - replicas: {{ .Values.replicas }} - podManagementPolicy: {{ .Values.podManagementPolicy }} - updateStrategy: - type: {{ .Values.updateStrategy }} - {{- if .Values.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: {{ template "elasticsearch.uname" . }} - {{- if .Values.persistence.labels.enabled }} - labels: - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - {{- with .Values.persistence.annotations }} - annotations: -{{ toYaml . | indent 8 }} - {{- end }} - spec: -{{ toYaml .Values.volumeClaimTemplate | indent 6 }} - {{- end }} - template: - metadata: - name: "{{ template "elasticsearch.uname" . }}" - labels: - release: {{ .Release.Name | quote }} - chart: "{{ .Chart.Name }}" - app: "{{ template "elasticsearch.uname" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{/* This forces a restart if the configmap has changed */}} - {{- if .Values.esConfig }} - configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} - {{- end }} - spec: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} - securityContext: -{{ toYaml .Values.podSecurityContext | indent 8 }} - {{- if .Values.fsGroup }} - fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup - {{- end }} - {{- if .Values.rbac.create }} - serviceAccountName: "{{ template "elasticsearch.uname" . }}" - {{- else if not (eq .Values.rbac.serviceAccountName "") }} - serviceAccountName: {{ .Values.rbac.serviceAccountName | quote }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 6 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName }} - {{- end }} - affinity: - {{- end }} - {{- if eq .Values.antiAffinity "hard" }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "elasticsearch.uname" .}}" - topologyKey: {{ .Values.antiAffinityTopologyKey }} - {{- else if eq .Values.antiAffinity "soft" }} - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: {{ .Values.antiAffinityTopologyKey }} - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "elasticsearch.uname" . }}" - {{- end }} - {{- with .Values.nodeAffinity }} - nodeAffinity: -{{ toYaml . | indent 10 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} - volumes: - {{- range .Values.secretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- if .defaultMode }} - defaultMode: {{ .defaultMode }} - {{- end }} - {{- end }} - {{- if .Values.esConfig }} - - name: esconfig - configMap: - name: {{ template "elasticsearch.uname" . }}-config - {{- end }} -{{- if .Values.keystore }} - - name: keystore - emptyDir: {} - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - secret: {{ toYaml . | nindent 12 }} - {{- end }} -{{ end }} - {{- if .Values.extraVolumes }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumes) }} -{{ tpl .Values.extraVolumes . | indent 8 }} - {{- else }} -{{ toYaml .Values.extraVolumes | indent 8 }} - {{- end }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - enableServiceLinks: {{ .Values.enableServiceLinks }} - {{- if .Values.hostAliases }} - hostAliases: {{ toYaml .Values.hostAliases | nindent 8 }} - {{- end }} - {{- if or (.Values.extraInitContainers) (.Values.sysctlInitContainer.enabled) (.Values.keystore) }} - initContainers: - {{- if .Values.sysctlInitContainer.enabled }} - - name: configure-sysctl - securityContext: - runAsUser: 0 - privileged: true - image: "{{ .Values.image }}:{{ .Values.imageTag }}" - imagePullPolicy: "{{ .Values.imagePullPolicy }}" - command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] - resources: -{{ toYaml .Values.initResources | indent 10 }} - {{- end }} -{{ if .Values.keystore }} - - name: keystore - image: "{{ .Values.image }}:{{ .Values.imageTag }}" - imagePullPolicy: "{{ .Values.imagePullPolicy }}" - command: - - sh - - -c - - | - #!/usr/bin/env bash - set -euo pipefail - - elasticsearch-keystore create - - for i in /tmp/keystoreSecrets/*/*; do - key=$(basename $i) - echo "Adding file $i to keystore key $key" - elasticsearch-keystore add-file "$key" "$i" - done - - # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup - if [ ! -z ${ELASTIC_PASSWORD+x} ]; then - echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' - echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password - fi - - cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ - env: {{ toYaml .Values.extraEnvs | nindent 10 }} - envFrom: {{ toYaml .Values.envFrom | nindent 10 }} - resources: {{ toYaml .Values.initResources | nindent 10 }} - volumeMounts: - - name: keystore - mountPath: /tmp/keystore - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - mountPath: /tmp/keystoreSecrets/{{ .secretName }} - {{- end }} -{{ end }} - {{- if .Values.extraInitContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} -{{ tpl .Values.extraInitContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraInitContainers | indent 6 }} - {{- end }} - {{- end }} - {{- end }} - containers: - - name: "{{ template "elasticsearch.name" . }}" - securityContext: -{{ toYaml .Values.securityContext | indent 10 }} - image: "{{ .Values.image }}:{{ .Values.imageTag }}" - imagePullPolicy: "{{ .Values.imagePullPolicy }}" - readinessProbe: - exec: - command: - - sh - - -c - - | - #!/usr/bin/env bash -e - # If the node is starting up wait for the cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" ) - # Once it has started only check that the node itself is responding - START_FILE=/tmp/.es_start_file - - # Disable nss cache to avoid filling dentry cache when calling curl - # This is required with Elasticsearch Docker using nss < 3.52 - export NSS_SDB_USE_CACHE=no - - http () { - local path="${1}" - local args="${2}" - set -- -XGET -s - - if [ "$args" != "" ]; then - set -- "$@" $args - fi - - if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then - set -- "$@" -u "${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" - fi - - curl --output /dev/null -k "$@" "{{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path}" - } - - if [ -f "${START_FILE}" ]; then - echo 'Elasticsearch is already running, lets check the node is healthy' - HTTP_CODE=$(http "/" "-w %{http_code}") - RC=$? - if [[ ${RC} -ne 0 ]]; then - echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with RC ${RC}" - exit ${RC} - fi - # ready if HTTP code 200, 503 is tolerable if ES version is 6.x - if [[ ${HTTP_CODE} == "200" ]]; then - exit 0 - elif [[ ${HTTP_CODE} == "503" && "{{ include "elasticsearch.esMajorVersion" . }}" == "6" ]]; then - exit 0 - else - echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with HTTP code ${HTTP_CODE}" - exit 1 - fi - - else - echo 'Waiting for elasticsearch cluster to become ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" "--fail" ; then - touch ${START_FILE} - exit 0 - else - echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - exit 1 - fi - fi -{{ toYaml .Values.readinessProbe | indent 10 }} - ports: - - name: http - containerPort: {{ .Values.httpPort }} - - name: transport - containerPort: {{ .Values.transportPort }} - resources: -{{ toYaml .Values.resources | indent 10 }} - env: - - name: node.name - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if eq .Values.roles.master "true" }} - {{- if ge (int (include "elasticsearch.esMajorVersion" .)) 7 }} - - name: cluster.initial_master_nodes - value: "{{ template "elasticsearch.endpoints" . }}" - {{- else }} - - name: discovery.zen.minimum_master_nodes - value: "{{ .Values.minimumMasterNodes }}" - {{- end }} - {{- end }} - {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} - - name: discovery.zen.ping.unicast.hosts - value: "{{ template "elasticsearch.masterService" . }}-headless" - {{- else }} - - name: discovery.seed_hosts - value: "{{ template "elasticsearch.masterService" . }}-headless" - {{- end }} - - name: cluster.name - value: "{{ .Values.clusterName }}" - - name: network.host - value: "{{ .Values.networkHost }}" - {{- if .Values.esJavaOpts }} - - name: ES_JAVA_OPTS - value: "{{ .Values.esJavaOpts }}" - {{- end }} - {{- range $role, $enabled := .Values.roles }} - - name: node.{{ $role }} - value: "{{ $enabled }}" - {{- end }} -{{- if .Values.extraEnvs }} -{{ toYaml .Values.extraEnvs | indent 10 }} -{{- end }} -{{- if .Values.envFrom }} - envFrom: -{{ toYaml .Values.envFrom | indent 10 }} -{{- end }} - volumeMounts: - {{- if .Values.persistence.enabled }} - - name: "{{ template "elasticsearch.uname" . }}" - mountPath: /usr/share/elasticsearch/data - {{- end }} -{{ if .Values.keystore }} - - name: keystore - mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore - subPath: elasticsearch.keystore -{{ end }} - {{- range .Values.secretMounts }} - - name: {{ .name }} - mountPath: {{ .path }} - {{- if .subPath }} - subPath: {{ .subPath }} - {{- end }} - {{- end }} - {{- range $path, $config := .Values.esConfig }} - - name: esconfig - mountPath: /usr/share/elasticsearch/config/{{ $path }} - subPath: {{ $path }} - {{- end -}} - {{- if .Values.extraVolumeMounts }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} -{{ tpl .Values.extraVolumeMounts . | indent 10 }} - {{- else }} -{{ toYaml .Values.extraVolumeMounts | indent 10 }} - {{- end }} - {{- end }} -{{- if .Values.lifecycle }} - lifecycle: -{{ toYaml .Values.lifecycle | indent 10 }} -{{- end }} - {{- if .Values.extraContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraContainers) }} -{{ tpl .Values.extraContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraContainers | indent 6 }} - {{- end }} - {{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/test/test-elasticsearch-health.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/test/test-elasticsearch-health.yaml deleted file mode 100644 index 704cd3d..0000000 --- a/deployment/deployment/middleware_deployment/elasticsearch/templates/test/test-elasticsearch-health.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -{{- if .Values.tests.enabled -}} -apiVersion: v1 -kind: Pod -metadata: -{{- if .Values.healthNameOverride }} - name: {{ .Values.healthNameOverride | quote }} -{{- else }} - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" -{{- end }} - annotations: - "helm.sh/hook": test - "helm.sh/hook-delete-policy": hook-succeeded -spec: - securityContext: -{{ toYaml .Values.podSecurityContext | indent 4 }} - containers: -{{- if .Values.healthNameOverride }} - - name: {{ .Values.healthNameOverride | quote }} -{{- else }} - - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" -{{- end }} - image: "{{ .Values.image }}:{{ .Values.imageTag }}" - imagePullPolicy: "{{ .Values.imagePullPolicy }}" - command: - - "sh" - - "-c" - - | - #!/usr/bin/env bash -e - curl -XGET --fail '{{ template "elasticsearch.uname" . }}:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }}' - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 4 }} - {{- end }} - restartPolicy: Never -{{- end -}} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/templates/tls-secret.yaml b/deployment/deployment/middleware_deployment/elasticsearch/templates/tls-secret.yaml new file mode 100644 index 0000000..f4f157a --- /dev/null +++ b/deployment/deployment/middleware_deployment/elasticsearch/templates/tls-secret.yaml @@ -0,0 +1,99 @@ +{{- if (include "elasticsearch.createTlsSecret" .) }} +{{- $ca := genCA "elasticsearch-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} + +{{- if not .Values.security.tls.master.existingSecret }} +--- +{{- $fullname := include "elasticsearch.master.fullname" . }} +{{- $serviceName := include "elasticsearch.master.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.master.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: master +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if not .Values.security.tls.data.existingSecret }} +--- +{{- $fullname := include "elasticsearch.data.fullname" . }} +{{- $serviceName := include "elasticsearch.data.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.data.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: data + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if not .Values.security.tls.coordinating.existingSecret }} +--- +{{- $fullname := include "elasticsearch.coordinating.fullname" . }} +{{- $serviceName := include "elasticsearch.coordinating.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if and .Values.ingest.enabled (not .Values.security.tls.ingest.existingSecret) }} +--- +{{- $fullname := include "elasticsearch.ingest.fullname" . }} +{{- $serviceName := include "elasticsearch.ingest.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/deployment/deployment/middleware_deployment/elasticsearch/values.yaml b/deployment/deployment/middleware_deployment/elasticsearch/values.yaml index 6c01d5c..f100302 100644 --- a/deployment/deployment/middleware_deployment/elasticsearch/values.yaml +++ b/deployment/deployment/middleware_deployment/elasticsearch/values.yaml @@ -1,345 +1,1870 @@ ---- -clusterName: "elasticsearch" -nodeGroup: "master" +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass -# The service that non master groups will try to connect to when joining the cluster -# This should be set to clusterName + "-" + nodeGroup for your master group -masterService: "" - -# Elasticsearch roles that will be applied to this nodeGroup -# These will be set as environment variables. E.g. node.master=true -roles: - master: "true" - ingest: "true" - data: "true" - remote_cluster_client: "true" - ml: "true" +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.coordinating.name Coordinating name to be used in the Kibana subchart (service name) +## @param global.kibanaEnabled Whether or not to enable Kibana +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + coordinating: + name: coordinating-only + kibanaEnabled: false -replicas: 3 -minimumMasterNodes: 2 +## @section Common parameters -esMajorVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] -# Allows you to add any config files in /usr/share/elasticsearch/config/ -# such as elasticsearch.yml and log4j2.properties -esConfig: {} -# elasticsearch.yml: | -# key: -# nestedkey: value -# log4j2.properties: | -# key = value +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity -# Extra environment variables to append to this nodeGroup -# This will be appended to the current 'env:' key. You can use any of the kubernetes env -# syntax here -extraEnvs: [] -# - name: MY_ENVIRONMENT_VAR -# value: the_value_goes_here +## @section Elasticsearch parameters -# Allows you to load environment variables from kubernetes secret or config map -envFrom: [] -# - secretRef: -# name: env-secret -# - configMapRef: -# name: config-map +## Bitnami Elasticsearch image version +## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/ +## @param image.registry Elasticsearch image registry +## @param image.repository Elasticsearch image repository +## @param image.tag Elasticsearch image tag (immutable tags are recommended) +## @param image.pullPolicy Elasticsearch image pull policy +## @param image.pullSecrets Elasticsearch image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/elasticsearch + tag: 7.17.2-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false -# A list of secrets and their paths to mount inside the pod -# This is useful for mounting certificates for security and for mounting -# the X-Pack license -secretMounts: [] -# - name: elastic-certificates -# secretName: elastic-certificates -# path: /usr/share/elasticsearch/config/certs -# defaultMode: 0755 +## X-Pack security parameters +## Note: TLS configuration is required in order to configure password authentication +## +security: + ## @param security.enabled Enable X-Pack Security settings + ## + enabled: false + ## @param security.elasticPassword Password for 'elastic' user + ## Ref: https://github.com/bitnami/bitnami-docker-elasticsearch#security + ## + elasticPassword: "" + ## @param security.existingSecret Name of the existing secret containing the Elasticsearch password + ## + existingSecret: "" + ## FIPS mode + ## @param security.fipsMode Configure elasticsearch with FIPS 140 compliant mode + ## Ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/fips-140-compliance.html + ## + fipsMode: false + ## TLS configuration + ## + tls: + ## @param security.tls.restEncryption Enable SSL/TLS encryption for Elasticsearch REST API. + ## + restEncryption: true + ## @param security.tls.autoGenerated Create self-signed TLS certificates. + ## Note: Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param security.tls.verificationMode Verification mode for SSL communications. + ## Supported values: full, certificate, none. + ## Ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html + ## + verificationMode: "full" + ## @param security.tls.master.existingSecret Existing secret containing the certificates for the master nodes + ## @param security.tls.data.existingSecret Existing secret containing the certificates for the data nodes + ## @param security.tls.ingest.existingSecret Existing secret containing the certificates for the ingest nodes + ## @param security.tls.coordinating.existingSecret Existing secret containing the certificates for the coordinating nodes + ## + master: + existingSecret: "" + data: + existingSecret: "" + ingest: + existingSecret: "" + coordinating: + existingSecret: "" + ## @param security.tls.keystorePassword Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. + ## + keystorePassword: "" + ## @param security.tls.truststorePassword Password to access the JKS/PKCS12 truststore when they are password-protected. + ## + truststorePassword: "" + ## @param security.tls.keystoreFilename Name of the keystore file + ## + keystoreFilename: elasticsearch.keystore.jks + ## @param security.tls.truststoreFilename Name of the truststore + ## + truststoreFilename: elasticsearch.truststore.jks + ## @param security.tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 + ## Ignored when using autoGenerated certs. + ## + usePemCerts: false + ## @param security.tls.keyPassword Password to access the PEM key when they are password-protected. + ## + keyPassword: "" -hostAliases: [] -#- ip: "127.0.0.1" -# hostnames: -# - "foo.local" -# - "bar.local" +## Elasticsearch cluster name +## @param name Elasticsearch cluster name +## +name: elastic +## @param plugins Comma, semi-colon or space separated list of plugins to install at initialization +## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables +## +plugins: "" +## @param snapshotRepoPath File System snapshot repository path +## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables +## +snapshotRepoPath: "" +## @param config Override elasticsearch configuration +## +config: {} +## @param extraConfig Append extra configuration to the elasticsearch node configuration +## Use this instead of `config` to add more configuration +## See below example: +## extraConfig: +## node: +## store: +## allow_mmap: false +## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html +## +extraConfig: {} +## @param extraVolumes A list of volumes to be added to the pod +## Example Use Case: mount ssl certificates when elasticsearch has tls enabled +## extraVolumes: +## - name: es-certs +## secret: +## defaultMode: 420 +## secretName: es-certs +extraVolumes: [] +## @param extraVolumeMounts A list of volume mounts to be added to the pod +## extraVolumeMounts: +## - name: es-certs +## mountPath: /certs +## readOnly: true +extraVolumeMounts: [] +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +## +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraEnvVars Array containing extra env vars to be added to all pods (evaluated as a template) +## For example: +## extraEnvVars: +## - name: MY_ENV_VAR +## value: env_var_value +## +extraEnvVars: [] +## @param extraEnvVarsConfigMap ConfigMap containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsConfigMap: "" +## @param extraEnvVarsSecret Secret containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsSecret: "" -image: "docker.elastic.co/elasticsearch/elasticsearch" -imageTag: "7.15.0" -imagePullPolicy: "IfNotPresent" +## @section Master parameters -podAnnotations: - {} - # iam.amazonaws.com/role: es-cluster +## Elasticsearch master-eligible node parameters +## +master: + ## @param master.name Master-eligible node pod name + ## + name: master + ## @param master.fullnameOverride String to fully override elasticsearch.master.fullname template with a string + ## + fullnameOverride: "" + ## @param master.replicas Desired number of Elasticsearch master-eligible nodes. Consider using an odd number of master nodes to prevent "split brain" situation. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-voting.html + ## https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-voting.html#_even_numbers_of_master_eligible_nodes + ## https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-quorums.html#modules-discovery-quorums + ## + replicas: 3 + ## Update strategy for ElasticSearch master statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param master.updateStrategy.type Update strategy for Master statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param master.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## + ## @param master.heapSize Master-eligible node heap size + ## + heapSize: 128m + ## @param master.podAnnotations Annotations for master-eligible pods. + ## + podAnnotations: {} + ## @param master.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for master-eligible pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param master.securityContext.enabled Enable security context for master-eligible pods + ## @param master.securityContext.fsGroup Group ID for the container for master-eligible pods + ## @param master.securityContext.runAsUser User ID for the container for master-eligible pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for master-eligible pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param master.podSecurityContext.enabled Enable security context for master-eligible pods + ## @param master.podSecurityContext.fsGroup Group ID for the container for master-eligible pods + ## + podSecurityContext: + enabled: false + fsGroup: 1001 + ## Container Security Context for master-eligible pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param master.containerSecurityContext.enabled Enable security context for master-eligible pods + ## @param master.containerSecurityContext.runAsUser User ID for the container for master-eligible pods + ## + containerSecurityContext: + enabled: false + runAsUser: 1001 + ## @param master.podAffinityPreset Master-eligible Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Master-eligible Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset. Allowed values: soft, hard + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param master.nodeAffinityPreset.type Master-eligible Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param master.nodeAffinityPreset.key Master-eligible Node label key to match Ignored if `affinity` is set. + ## @param master.nodeAffinityPreset.values Master-eligible Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Master-eligible Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param master.priorityClassName Master pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param master.nodeSelector Master-eligible Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Master-eligible Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch master-eligible container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for the container + ## @param master.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch master-eligible container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param master.startupProbe.enabled Enable/disable the startup probe (master nodes pod) + ## @param master.startupProbe.initialDelaySeconds Delay before startup probe is initiated (master nodes pod) + ## @param master.startupProbe.periodSeconds How often to perform the probe (master nodes pod) + ## @param master.startupProbe.timeoutSeconds When the probe times out (master nodes pod) + ## @param master.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) + ## @param master.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch master-eligible container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param master.livenessProbe.enabled Enable/disable the liveness probe (master-eligible nodes pod) + ## @param master.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (master-eligible nodes pod) + ## @param master.livenessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.livenessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch master-eligible container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param master.readinessProbe.enabled Enable/disable the readiness probe (master-eligible nodes pod) + ## @param master.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (master-eligible nodes pod) + ## @param master.readinessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.readinessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param master.initContainers Extra init containers to add to the Elasticsearch master-eligible pod(s) + ## + initContainers: [] + ## @param master.sidecars Extra sidecar containers to add to the Elasticsearch master-eligible pod(s) + ## + sidecars: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param master.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. + ## + existingVolume: "" + ## @param master.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param master.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Service parameters for master-eligible node(s) + ## + service: + ## @param master.service.type Kubernetes Service type (master-eligible nodes) + ## + type: ClusterIP + ## @param master.service.port Kubernetes Service port for Elasticsearch transport port (master-eligible nodes) + ## + port: 9300 + ## @param master.service.nodePort Kubernetes Service nodePort (master-eligible nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param master.service.annotations Annotations for master-eligible nodes service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param master.service.loadBalancerIP loadBalancerIP if master-eligible nodes service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Configure the ingress resource that allows you to access the + ## Set up the URL + ## ref: https://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + ## @param master.ingress.enabled Enable ingress controller resource + ## + enabled: false -# additionals labels -labels: {} + ## @param master.ingress.pathType Ingress Path type + ## + pathType: ImplementationSpecific + ## @param master.ingress.apiVersion Override API Version (automatically detected if not set) + ## + apiVersion: "" + ## @param master.ingress.hostname Default host for the ingress resource. If specified as "*" no host rule is configured + ## + hostname: master.local + ## @param master.ingress.path The Path to Master. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param master.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param master.ingress.tls Enable TLS configuration for the hostname defined at master.ingress.hostname parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.master.ingress.hostname }} + ## You can use the master.ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it + ## + tls: false + ## @param master.ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## extraHosts: + ## - name: master.local + ## path: / + ## + extraHosts: [] + ## @param master.ingress.extraPaths Additional arbitrary path/backend objects + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param master.ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## extraTls: + ## - hosts: + ## - master.local + ## secretName: master.local-tls + ## + extraTls: [] + ## @param master.ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## - name: master.local-tls + ## key: + ## certificate: + ## + secrets: [] + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param master.serviceAccount.create Enable creation of ServiceAccount for the master node + ## + create: false + ## @param master.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + name: "" + ## Autoscaling configuration + ## @param master.autoscaling.enabled Enable autoscaling for master replicas + ## @param master.autoscaling.minReplicas Minimum number of master replicas + ## @param master.autoscaling.maxReplicas Maximum number of master replicas + ## @param master.autoscaling.targetCPU Target CPU utilization percentage for master replica autoscaling + ## @param master.autoscaling.targetMemory Target Memory utilization percentage for master replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" -esJavaOpts: "" # example: "-Xmx1g -Xms1g" +## @section Coordinating parameters -resources: - requests: - cpu: "100m" - memory: "1Gi" - limits: - cpu: "100m" - memory: "1Gi" +## Elasticsearch coordinating-only node parameters +## +coordinating: + ## @param coordinating.fullnameOverride String to fully override elasticsearch.coordinating.fullname template with a string + ## + fullnameOverride: "" + ## @param coordinating.replicas Desired number of Elasticsearch coordinating-only nodes + ## + replicas: 2 + ## @param coordinating.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param coordinating.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Update strategy for ElasticSearch coordinating statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param coordinating.updateStrategy.type Update strategy for Coordinating Statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param coordinating.heapSize Coordinating-only node heap size + ## + heapSize: 128m + ## @param coordinating.podAnnotations Annotations for coordinating pods. + ## + podAnnotations: {} + ## @param coordinating.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.securityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.securityContext.fsGroup Group ID for the container for coordinating-only pods + ## @param coordinating.securityContext.runAsUser User ID for the container for coordinating-only pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.podSecurityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.podSecurityContext.fsGroup Group ID for the container for coordinating-only pods + ## + podSecurityContext: + enabled: false + fsGroup: 1001 + ## Container Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.containerSecurityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.containerSecurityContext.runAsUser User ID for the container for coordinating-only pods + ## + containerSecurityContext: + enabled: false + runAsUser: 1001 + ## @param coordinating.podAffinityPreset Coordinating Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param coordinating.podAntiAffinityPreset Coordinating Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param coordinating.nodeAffinityPreset.type Coordinating Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param coordinating.nodeAffinityPreset.key Coordinating Node label key to match Ignored if `affinity` is set. + ## @param coordinating.nodeAffinityPreset.values Coordinating Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param coordinating.affinity Coordinating Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param coordinating.priorityClassName Coordinating pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param coordinating.nodeSelector Coordinating Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param coordinating.tolerations Coordinating Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param coordinating.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch coordinating-only container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param coordinating.resources.limits The resources limits for the container + ## @param coordinating.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 384Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch coordinating-only container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.startupProbe.enabled Enable/disable the startup probe (coordinating nodes pod) + ## @param coordinating.startupProbe.initialDelaySeconds Delay before startup probe is initiated (coordinating nodes pod) + ## @param coordinating.startupProbe.periodSeconds How often to perform the probe (coordinating nodes pod) + ## @param coordinating.startupProbe.timeoutSeconds When the probe times out (coordinating nodes pod) + ## @param coordinating.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch coordinating-only container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.livenessProbe.enabled Enable/disable the liveness probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch coordinating-only container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.readinessProbe.enabled Enable/disable the readiness probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param coordinating.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param coordinating.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param coordinating.initContainers Extra init containers to add to the Elasticsearch coordinating-only pod(s) + ## + initContainers: [] + ## @param coordinating.sidecars Extra sidecar containers to add to the Elasticsearch coordinating-only pod(s) + ## + sidecars: [] + ## Service parameters for coordinating-only node(s) + ## + service: + ## @param coordinating.service.type Kubernetes Service type (coordinating-only nodes) + ## + type: ClusterIP + ## @param coordinating.service.port Kubernetes Service port for REST API (coordinating-only nodes) + ## + port: 9200 + ## @param coordinating.service.nodePort Kubernetes Service nodePort (coordinating-only nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param coordinating.service.annotations Annotations for coordinating-only nodes service + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param coordinating.service.loadBalancerIP loadBalancerIP if coordinating-only nodes service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param coordinating.service.externalTrafficPolicy Enable client source IP preservation with externalTrafficPolicy: Local + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param coordinating.serviceAccount.create Enable creation of ServiceAccount for the coordinating-only node + ## + create: false + ## @param coordinating.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## Autoscaling configuration + ## @param coordinating.autoscaling.enabled Enable autoscaling for coordinating replicas + ## @param coordinating.autoscaling.minReplicas Minimum number of coordinating replicas + ## @param coordinating.autoscaling.maxReplicas Maximum number of coordinating replicas + ## @param coordinating.autoscaling.targetCPU Target CPU utilization percentage for coordinating replica autoscaling + ## @param coordinating.autoscaling.targetMemory Target Memory utilization percentage for coordinating replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" -initResources: - {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" +## @section Data parameters -networkHost: "0.0.0.0" +## Elasticsearch data node parameters +## +data: + ## @param data.name Data node pod name + ## + name: data + ## @param data.fullnameOverride String to fully override elasticsearch.data.fullname template with a string + ## + fullnameOverride: "" + ## @param data.replicas Desired number of Elasticsearch data nodes + ## + replicas: 2 + ## @param data.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param data.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Update strategy for ElasticSearch Data statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param data.updateStrategy.type Update strategy for Data statefulset + ## @param data.updateStrategy.rollingUpdatePartition Partition update strategy for Data statefulset + ## + updateStrategy: + type: RollingUpdate + rollingUpdatePartition: "" + ## @param data.heapSize Data node heap size + ## + heapSize: 1024m + ## @param data.podAnnotations Annotations for data pods. + ## + podAnnotations: {} + ## @param data.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.securityContext.enabled Enable security context for data pods + ## @param data.securityContext.fsGroup Group ID for the container for data pods + ## @param data.securityContext.runAsUser User ID for the container for data pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.podSecurityContext.enabled Enable security context for data pods + ## @param data.podSecurityContext.fsGroup Group ID for the container for data pods + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.containerSecurityContext.enabled Enable security context for data pods + ## @param data.containerSecurityContext.runAsUser User ID for the container for data pods + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param data.podAffinityPreset Data Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param data.podAntiAffinityPreset Data Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset. Allowed values: soft, hard + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param data.nodeAffinityPreset.type Data Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param data.nodeAffinityPreset.key Data Node label key to match Ignored if `affinity` is set. + ## @param data.nodeAffinityPreset.values Data Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] -volumeClaimTemplate: - accessModes: ["ReadWriteOnce"] - storageClassName: "csi-high-perf" + ## @param data.affinity Data Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param data.priorityClassName Data pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param data.nodeSelector Data Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param data.tolerations Data Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param data.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch data container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param data.resources.limits The resources limits for the container + ## @param data.resources.requests [object] The requested resources for the container + ## resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 2176Mi + limits: {} requests: - storage: 10Gi - -rbac: - create: false - serviceAccountAnnotations: {} - serviceAccountName: "" + cpu: 25m + memory: 2048Mi + ## Elasticsearch data container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.startupProbe.enabled Enable/disable the startup probe (data nodes pod) + ## @param data.startupProbe.initialDelaySeconds Delay before startup probe is initiated (data nodes pod) + ## @param data.startupProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.startupProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch data container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.livenessProbe.enabled Enable/disable the liveness probe (data nodes pod) + ## @param data.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (data nodes pod) + ## @param data.livenessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.livenessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch data container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.readinessProbe.enabled Enable/disable the readiness probe (data nodes pod) + ## @param data.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (data nodes pod) + ## @param data.readinessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.readinessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param data.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param data.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param data.initContainers Extra init containers to add to the Elasticsearch data pod(s) + ## + initContainers: [] + ## @param data.sidecars Extra sidecar containers to add to the Elasticsearch data pod(s) + ## + sidecars: [] + ## Service parameters for data-eligible node(s) + ## + service: + ## @param data.service.annotations Annotations for data-eligible nodes service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.existingClaim Existing Persistent Volume Claim + ## If persistence is enable, and this value is defined, + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param data.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` ist set. + ## + existingVolume: "" + ## @param data.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + selector: {} + ## @param data.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param data.serviceAccount.create Enable creation of ServiceAccount for the data node + ## + create: false + ## @param data.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## Autoscaling configuration + ## @param data.autoscaling.enabled Enable autoscaling for data replicas + ## @param data.autoscaling.minReplicas Minimum number of data replicas + ## @param data.autoscaling.maxReplicas Maximum number of data replicas + ## @param data.autoscaling.targetCPU Target CPU utilization percentage for data replica autoscaling + ## @param data.autoscaling.targetMemory Target Memory utilization percentage for data replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" -podSecurityPolicy: - create: false - name: "" - spec: - privileged: true - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - secret - - configMap - - persistentVolumeClaim - - emptyDir +## @section Ingest parameters -persistence: +## Elasticsearch ingest node parameters +## +ingest: + ## @param ingest.enabled Enable ingest nodes + ## enabled: false - labels: - # Add default labels for the volumeClaimTemplate of the StatefulSet + ## @param ingest.name Ingest node pod name + ## + name: ingest + ## @param ingest.fullnameOverride String to fully override elasticsearch.ingest.fullname template with a string + ## + fullnameOverride: "" + ## @param ingest.replicas Desired number of Elasticsearch ingest nodes + ## + replicas: 2 + ## Update strategy for ElasticSearch ingest statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param ingest.updateStrategy.type Update strategy for Ingest statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param ingest.heapSize Ingest node heap size + ## + heapSize: 128m + ## @param ingest.podAnnotations Annotations for ingest pods. + ## + podAnnotations: {} + ## @param ingest.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param ingest.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param ingest.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.securityContext.enabled Enable security context for ingest pods + ## @param ingest.securityContext.fsGroup Group ID for the container for ingest pods + ## @param ingest.securityContext.runAsUser User ID for the container for ingest pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.podSecurityContext.enabled Enable security context for ingest pods + ## @param ingest.podSecurityContext.fsGroup Group ID for the container for ingest pods + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.containerSecurityContext.enabled Enable security context for ingest pods + ## @param ingest.containerSecurityContext.runAsUser User ID for the container for ingest pods + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param ingest.podAffinityPreset Ingest Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param ingest.podAntiAffinityPreset Ingest Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## @param ingest.nodeAffinityPreset.type Ingest Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param ingest.nodeAffinityPreset.key Ingest Node label key to match Ignored if `affinity` is set. + ## @param ingest.nodeAffinityPreset.values Ingest Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param ingest.affinity Ingest Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param ingest.priorityClassName Ingest pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param ingest.nodeSelector Ingest Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param ingest.tolerations Ingest Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param ingest.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch ingest container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param ingest.resources.limits The resources limits for the container + ## @param ingest.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 384Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch ingest container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.startupProbe.enabled Enable/disable the startup probe (ingest nodes pod) + ## @param ingest.startupProbe.initialDelaySeconds Delay before startup probe is initiated (ingest nodes pod) + ## @param ingest.startupProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.startupProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + startupProbe: enabled: false - annotations: {} - -extraVolumes: - [] - # - name: extras - # emptyDir: {} - -extraVolumeMounts: - [] - # - name: extras - # mountPath: /usr/share/extras - # readOnly: true - -extraContainers: - [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -extraInitContainers: - [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -# This is the PriorityClass settings as defined in -# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -priorityClassName: "" - -# By default this will make sure two pods don't end up on the same node -# Changing this to a region would allow you to spread pods across regions -antiAffinityTopologyKey: "kubernetes.io/hostname" - -# Hard means that by default pods will only be scheduled if there are enough nodes for them -# and that they will never end up on the same node. Setting this to soft will do this "best effort" -antiAffinity: "hard" - -# This is the node affinity settings as defined in -# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -nodeAffinity: {} - -# The default is to deploy all pods serially. By setting this to parallel all pods are started at -# the same time when bootstrapping the cluster -podManagementPolicy: "Parallel" - -# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when -# there are many services in the current namespace. -# If you experience slow pod startups you probably want to set this to `false`. -enableServiceLinks: true - -protocol: http -httpPort: 9200 -transportPort: 9300 - -service: - enabled: true - labels: {} - labelsHeadless: {} - type: ClusterIP - nodePort: "" - annotations: {} - httpPortName: http - transportPortName: transport - loadBalancerIP: "" - loadBalancerSourceRanges: [] - externalTrafficPolicy: "" - -updateStrategy: RollingUpdate - -# This is the max unavailable setting for the pod disruption budget -# The default value of 1 will make sure that kubernetes won't allow more than 1 -# of your pods to be unavailable during maintenance -maxUnavailable: 1 - -podSecurityContext: - fsGroup: 1000 - runAsUser: 1000 - -securityContext: - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# How long to wait for elasticsearch to stop gracefully -terminationGracePeriod: 120 - -sysctlVmMaxMapCount: 262144 + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch ingest container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.livenessProbe.enabled Enable/disable the liveness probe (ingest nodes pod) + ## @param ingest.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (ingest nodes pod) + ## @param ingest.livenessProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.livenessProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch ingest container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.readinessProbe.enabled Enable/disable the readiness probe (ingest nodes pod) + ## @param ingest.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (ingest nodes pod) + ## @param ingest.readinessProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.readinessProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param ingest.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param ingest.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param ingest.initContainers Extra init containers to add to the Elasticsearch ingest pod(s) + ## + initContainers: [] + ## @param ingest.sidecars Extra sidecar containers to add to the Elasticsearch ingest pod(s) + ## + sidecars: [] + ## Service parameters for ingest node(s) + ## + service: + ## @param ingest.service.type Kubernetes Service type (ingest nodes) + ## + type: ClusterIP + ## @param ingest.service.port Kubernetes Service port Elasticsearch transport port (ingest nodes) + ## + port: 9300 + ## @param ingest.service.nodePort Kubernetes Service nodePort (ingest nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param ingest.service.annotations Annotations for ingest nodes service + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param ingest.service.loadBalancerIP loadBalancerIP if ingest nodes service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" -readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param ingest.serviceAccount.create Create a default serviceaccount for elasticsearch curator + ## + create: false + ## @param ingest.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" -# https://www.elastic.co/guide/en/elasticsearch/reference/7.15/cluster-health.html#request-params wait_for_status -clusterHealthCheckParams: "wait_for_status=green&timeout=1s" +## @section Curator parameters -## Use an alternate scheduler. -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## Elasticsearch curator parameters ## -schedulerName: "" +curator: + ## @param curator.enabled Enable Elasticsearch Curator cron job + enabled: false + ## @param curator.name Elasticsearch Curator pod name + ## + name: curator + ## @param curator.image.registry Elasticsearch Curator image registry + ## @param curator.image.repository Elasticsearch Curator image repository + ## @param curator.image.tag Elasticsearch Curator image tag + ## @param curator.image.pullPolicy Elasticsearch Curator image pull policy + ## @param curator.image.pullSecrets Elasticsearch Curator image pull secrets + ## + image: + registry: docker.io + repository: bitnami/elasticsearch-curator + tag: 5.8.4-debian-10-r304 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param curator.cronjob.schedule Schedule for the CronJob + ## @param curator.cronjob.annotations Annotations to add to the cronjob + ## @param curator.cronjob.concurrencyPolicy `Allow,Forbid,Replace` concurrent jobs + ## @param curator.cronjob.failedJobsHistoryLimit Specify the number of failed Jobs to keep + ## @param curator.cronjob.successfulJobsHistoryLimit Specify the number of completed Jobs to keep + ## @param curator.cronjob.jobRestartPolicy Control the Job restartPolicy + ## + cronjob: + ## At 01:00 every day + schedule: "0 1 * * *" + annotations: {} + concurrencyPolicy: "" + failedJobsHistoryLimit: "" + successfulJobsHistoryLimit: "" + jobRestartPolicy: Never + ## @param curator.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param curator.podAnnotations Annotations to add to the pod + ## + podAnnotations: {} + ## @param curator.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## @param curator.podAffinityPreset Curator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param curator.podAntiAffinityPreset Curator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param curator.nodeAffinityPreset.type Curator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param curator.nodeAffinityPreset.key Curator Node label key to match Ignored if `affinity` is set. + ## @param curator.nodeAffinityPreset.values Curator Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param curator.initContainers Extra init containers to add to the Elasticsearch coordinating-only pod(s) + ## + initContainers: [] + ## @param curator.sidecars Extra sidecar containers to add to the Elasticsearch ingest pod(s) + ## + sidecars: [] + ## @param curator.affinity Curator Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param curator.nodeSelector Curator Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param curator.tolerations Curator Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param curator.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param curator.rbac.enabled Enable RBAC resources + ## + rbac: + enabled: false + ## @param curator.serviceAccount.create Create a default serviceaccount for elasticsearch curator + ## @param curator.serviceAccount.name Name for elasticsearch curator serviceaccount + ## + serviceAccount: + create: true + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## @param curator.psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + psp: + create: false + ## @param curator.hooks [object] Whether to run job on selected hooks + ## + hooks: + install: false + upgrade: false + ## @param curator.dryrun Run Curator in dry-run mode + ## + dryrun: false + ## @param curator.command Command to execute + ## + command: ["curator"] + ## @param curator.env Environment variables to add to the cronjob container + ## + env: {} + ## Curator configMaps + configMaps: + ## @param curator.configMaps.action_file_yml [string] Contents of the Curator action_file.yml + ## Delete indices older than 90 days + ## + action_file_yml: |- + --- + actions: + 1: + action: delete_indices + description: "Clean up ES by deleting old indices" + options: + timeout_override: + continue_if_exception: False + disable_action: False + ignore_empty_list: True + filters: + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: 90 + field: + stats_result: + epoch: + exclude: False + ## @param curator.configMaps.config_yml [string] Contents of the Curator config.yml (overrides config) + ## Default config (this value is evaluated as a template) + ## + config_yml: |- + --- + client: + hosts: + - {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + port: {{ .Values.coordinating.service.port }} + # url_prefix: + # use_ssl: True + # certificate: + # client_cert: + # client_key: + # ssl_no_validate: True + # http_auth: + # timeout: 30 + # master_only: False + # logging: + # loglevel: INFO + # logfile: + # logformat: default + # blacklist: ['elasticsearch', 'urllib3'] + ## Curator resources requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param curator.resources.limits The resources limits for the container + ## @param curator.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} + ## @param curator.priorityClassName Curator Pods Priority Class Name + ## + priorityClassName: "" + ## @param curator.extraVolumes Extra volumes + ## Example Use Case: mount ssl certificates when elasticsearch has tls enabled + ## extraVolumes: + ## - name: es-certs + ## secret: + ## defaultMode: 420 + ## secretName: es-certs + extraVolumes: [] + ## @param curator.extraVolumeMounts Mount extra volume(s) + ## extraVolumeMounts: + ## - name: es-certs + ## mountPath: /certs + ## readOnly: true + extraVolumeMounts: [] + ## @param curator.extraInitContainers DEPRECATED. Use `curator.initContainers` instead. Init containers to add to the cronjob container + ## Don't configure S3 repository till Elasticsearch is reachable. + ## Ensure that it is available at http://elasticsearch:9200 + ## + ## elasticsearch-s3-repository: + ## image: bitnami/minideb + ## imagePullPolicy: "IfNotPresent" + ## command: + ## - "/bin/bash" + ## - "-c" + ## args: + ## - | + ## ES_HOST=elasticsearch + ## ES_PORT=9200 + ## ES_REPOSITORY=backup + ## S3_REGION=us-east-1 + ## S3_BUCKET=bucket + ## S3_BASE_PATH=backup + ## S3_COMPRESS=true + ## S3_STORAGE_CLASS=standard + ## install_packages curl && \ + ## ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \ + ## cat < /usr/share/message"] - # postStart: - # exec: - # command: - # - bash - # - -c - # - | - # #!/bin/bash - # # Add a template to adjust number of shards/replicas - # TEMPLATE_NAME=my_template - # INDEX_PATTERN="logstash-*" - # SHARD_COUNT=8 - # REPLICA_COUNT=1 - # ES_URL=http://localhost:9200 - # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' +## @section Sysctl Image parameters -sysctlInitContainer: +## Kernel settings modifier image +## +sysctlImage: + ## @param sysctlImage.enabled Enable kernel settings modifier image + ## enabled: true + ## @param sysctlImage.registry Kernel settings modifier image registry + ## @param sysctlImage.repository Kernel settings modifier image repository + ## @param sysctlImage.tag Kernel settings modifier image tag + ## @param sysctlImage.pullPolicy Kernel settings modifier image pull policy + ## @param sysctlImage.pullSecrets Kernel settings modifier image pull secrets + ## + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r386 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param sysctlImage.resources.limits The resources limits for the container + ## @param sysctlImage.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} -keystore: [] - -networkPolicy: - ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## In order for a Pod to access Elasticsearch, it needs to have the following label: - ## {{ template "uname" . }}-client: "true" - ## Example for default configuration to access HTTP port: - ## elasticsearch-master-http-client: "true" - ## Example for default configuration to access transport port: - ## elasticsearch-master-transport-client: "true" - - http: - enabled: false - ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace - ## and matching all criteria can reach the DB. - ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this - ## parameter to select these namespaces - ## - # explicitNamespacesSelector: - # # Accept from namespaces with all those different rules (only from whitelisted Pods) - # matchLabels: - # role: frontend - # matchExpressions: - # - {key: role, operator: In, values: [frontend]} - ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. - ## - # additionalRules: - # - podSelector: - # matchLabels: - # role: frontend - # - podSelector: - # matchExpressions: - # - key: role - # operator: In - # values: - # - frontend +## @section VolumePermissions parameters - transport: - ## Note that all Elasticsearch Pods can talks to themselves using transport port even if enabled. - enabled: false - # explicitNamespacesSelector: - # matchLabels: - # role: frontend - # matchExpressions: - # - {key: role, operator: In, values: [frontend]} - # additionalRules: - # - podSelector: - # matchLabels: - # role: frontend - # - podSelector: - # matchExpressions: - # - key: role - # operator: In - # values: - # - frontend +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image name + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r386 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits The resources limits for the container + ## @param volumePermissions.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} -tests: - enabled: true +## @section Kibana Parameters -# Deprecated -# please use the above podSecurityContext.fsGroup instead -fsGroup: "" +## Bundled Kibana parameters +## @param kibana.elasticsearch.hosts [array] Array containing hostnames for the ES instances. Used to generate the URL +## @param kibana.elasticsearch.port Port to connect Kibana and ES instance. Used to generate the URL +## +kibana: + elasticsearch: + hosts: + - '{{ include "elasticsearch.coordinating.fullname" . }}' + port: 9200 diff --git a/deployment/deployment/middleware_deployment/minio/values.yaml b/deployment/deployment/middleware_deployment/minio/values.yaml index c2557e4..00199a9 100755 --- a/deployment/deployment/middleware_deployment/minio/values.yaml +++ b/deployment/deployment/middleware_deployment/minio/values.yaml @@ -94,7 +94,7 @@ tls: ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: - enabled: false + enabled: true ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true @@ -110,7 +110,7 @@ persistence: ## ## Storage class of PV to bind. By default it looks for standard storage class. ## If the PV uses a different storage class, specify that here. - storageClass: "csi-high-perf" + storageClass: "" VolumeName: "" accessMode: ReadWriteOnce size: 10Gi diff --git a/deployment/deployment/middleware_deployment/mongodb/values.yaml b/deployment/deployment/middleware_deployment/mongodb/values.yaml index efdf258..51ffd1b 100644 --- a/deployment/deployment/middleware_deployment/mongodb/values.yaml +++ b/deployment/deployment/middleware_deployment/mongodb/values.yaml @@ -282,7 +282,7 @@ extraVolumeMountsArbiter: [] ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: - enabled: false + enabled: true ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound @@ -306,7 +306,7 @@ persistence: ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## - storageClass: "csi-high-perf" + storageClass: "" accessModes: - ReadWriteOnce size: 10Gi diff --git a/deployment/deployment/middleware_deployment/mysql/values.yaml b/deployment/deployment/middleware_deployment/mysql/values.yaml index c625e17..fdf3c5f 100644 --- a/deployment/deployment/middleware_deployment/mysql/values.yaml +++ b/deployment/deployment/middleware_deployment/mysql/values.yaml @@ -102,7 +102,7 @@ readinessProbe: ## Persist data to a persistent volume persistence: - enabled: false + enabled: true ## database data Persistent Volume Storage Class ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning @@ -110,7 +110,7 @@ persistence: ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## - storageClass: "csi-high-perf" + storageClass: "" accessMode: ReadWriteOnce size: 10Gi annotations: {} diff --git a/deployment/deployment/quanxiang_charts/builder/templates/pipeline/register-api.yaml b/deployment/deployment/quanxiang_charts/builder/templates/pipeline/register-api.yaml index f0dac35..787ecb8 100644 --- a/deployment/deployment/quanxiang_charts/builder/templates/pipeline/register-api.yaml +++ b/deployment/deployment/quanxiang_charts/builder/templates/pipeline/register-api.yaml @@ -39,7 +39,7 @@ spec: - name: url value: $(params.SOURCE_URL) - name: gitInitImage - value: wentevill/grc.io.git-init:v0.29.0 + value: docker.io/quanxiang/grc.io.git-init:v0.29.0 - name: PROJECT_NAME value: "$(params.PROJECT_NAME)" - name: PROJECT_TITLE diff --git a/deployment/deployment/quanxiang_charts/builder/values.yaml b/deployment/deployment/quanxiang_charts/builder/values.yaml index b0b6b27..111eab6 100644 --- a/deployment/deployment/quanxiang_charts/builder/values.yaml +++ b/deployment/deployment/quanxiang_charts/builder/values.yaml @@ -35,7 +35,7 @@ serviceAccount: podSecurityContext: {} securityContext: {} dockerConfig: - name: faas-harbor + name: faas-docker data: gitSSH: diff --git a/deployment/deployment/quanxiang_charts/faas/templates/configmap.yaml b/deployment/deployment/quanxiang_charts/faas/templates/configmap.yaml index e1b3f48..c5ead91 100644 --- a/deployment/deployment/quanxiang_charts/faas/templates/configmap.yaml +++ b/deployment/deployment/quanxiang_charts/faas/templates/configmap.yaml @@ -84,7 +84,7 @@ data: - step-results build-images: - go1.16: qxcr.xyz/faas/builder-go:v1alpha1.0.1 + go1.16: docker.io/quanxiang/builder-go:v1.16 templates: - full_name: handle.go @@ -97,7 +97,7 @@ data: "fmt" "net/http" - "github.com/quanxiang-cloud/faas-{{.}}-interface/{{.}}" + "github.com/quanxiang-cloud/faas-lowcode-interface/lowcode" ) type Request struct { @@ -118,7 +118,7 @@ data: // @Router / [post] func Handle(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - _, ok := ctx.Value({{.}}.LOWCODE).(lowcode.Lowcode) + _, ok := ctx.Value(lowcode.LOWCODE).(lowcode.Lowcode) if !ok { fmt.Printf("error") return diff --git a/deployment/deployment/quanxiang_charts/faas/templates/kafka.yaml b/deployment/deployment/quanxiang_charts/faas/templates/kafka.yaml index 38ebd68..f77e6f6 100644 --- a/deployment/deployment/quanxiang_charts/faas/templates/kafka.yaml +++ b/deployment/deployment/quanxiang_charts/faas/templates/kafka.yaml @@ -14,6 +14,6 @@ spec: - name: authType # Required. value: "none" - name: maxMessageBytes - value: 1024 + value: 1024000 - name: disableTls value: "true" \ No newline at end of file diff --git a/deployment/deployment/quanxiang_charts/faas/values.yaml b/deployment/deployment/quanxiang_charts/faas/values.yaml index a46d7a6..b9ce392 100644 --- a/deployment/deployment/quanxiang_charts/faas/values.yaml +++ b/deployment/deployment/quanxiang_charts/faas/values.yaml @@ -5,6 +5,8 @@ image: namespace: "" domain: example.com mongo_host: "" +code: + import: github.com/quanxiang-cloud/faas-lowcode-interface/lowcode args: enabled: true endpoint: example.com:31198 diff --git a/deployment/deployment/quanxiang_charts/fileserver/templates/ingress.yaml b/deployment/deployment/quanxiang_charts/fileserver/templates/ingress.yaml index 5fb1a8f..c3dc02c 100644 --- a/deployment/deployment/quanxiang_charts/fileserver/templates/ingress.yaml +++ b/deployment/deployment/quanxiang_charts/fileserver/templates/ingress.yaml @@ -4,6 +4,7 @@ metadata: name: fileserver namespace: {{ .Values.namespace }} annotations: + kubernetes.io/ingress.class: nginx nginx.ingress.kubernetes.io/proxy-body-size: 30m spec: # tls: diff --git a/deployment/deployment/quanxiang_charts/fluent-bit/templates/daemonset.yaml b/deployment/deployment/quanxiang_charts/fluent-bit/templates/daemonset.yaml index 5a7aae2..b06728b 100755 --- a/deployment/deployment/quanxiang_charts/fluent-bit/templates/daemonset.yaml +++ b/deployment/deployment/quanxiang_charts/fluent-bit/templates/daemonset.yaml @@ -26,7 +26,7 @@ spec: {{ toYaml .Values.podLabels | indent 8 }} {{- end }} annotations: - checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/fluent-bit-secret.yaml") . | sha256sum }} {{- if .Values.podAnnotations }} {{ toYaml .Values.podAnnotations | indent 8 }} {{- end }} @@ -191,8 +191,8 @@ spec: type: DirectoryOrCreate {{- end }} - name: config - configMap: - name: {{ if .Values.existingConfigMap }}{{ .Values.existingConfigMap }}{{- else }}{{ template "fluent-bit.fullname" . }}-config{{- end }} + secret: + secretName: {{ template "fluent-bit.fullname" . }}-config {{- if .Values.extraVolumes }} {{ toYaml .Values.extraVolumes | indent 6 }} {{- end }} diff --git a/deployment/deployment/quanxiang_charts/fluent-bit/templates/config.yaml b/deployment/deployment/quanxiang_charts/fluent-bit/templates/fluent-bit-secret.yaml similarity index 53% rename from deployment/deployment/quanxiang_charts/fluent-bit/templates/config.yaml rename to deployment/deployment/quanxiang_charts/fluent-bit/templates/fluent-bit-secret.yaml index 917ece7..a5f75be 100755 --- a/deployment/deployment/quanxiang_charts/fluent-bit/templates/config.yaml +++ b/deployment/deployment/quanxiang_charts/fluent-bit/templates/fluent-bit-secret.yaml @@ -1,19 +1,13 @@ -{{- if (empty .Values.existingConfigMap) -}} +{{- if (empty .Values.existingSecret) -}} apiVersion: v1 -kind: ConfigMap +kind: Secret metadata: name: {{ template "fluent-bit.fullname" . }}-config - labels: - app: {{ template "fluent-bit.name" . }} - chart: {{ .Chart.Name }}-{{ .Chart.Version }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -data: - fluent-bit-service.conf: | +type: Opaque +stringData: + fluent-bit.conf: | [Service] Parsers_File parsers.conf - - fluent-bit-input.conf: | [Input] Name tail Path /var/log/containers/*_builder_*.log @@ -25,8 +19,6 @@ data: Mem_Buf_Limit 5MB Parser docker Tag kube.* - - fluent-bit-filter.conf: | [Filter] Name kubernetes Match kube.* @@ -66,75 +58,30 @@ data: Match kube_events Key_Name log Parser json - - - fluent-bit-output.conf: | [Output] Name es Match_Regex (?:kube|service)\.(.*) - Host {{ .Values.elastic.host }} - Port {{ .Values.elastic.port }} + Host {{ .Values.backend.es.host }} + Port {{ .Values.backend.es.port }} Logstash_Format true Logstash_Prefix builder-log Time_Key @timestamp - - - - fluent-bit.conf: | -{{ .Values.rawConfig | indent 4 }} - - parsers.conf: | -{{- if .Values.parsers.regex }} -{{- range .Values.parsers.regex }} - [PARSER] - Name {{ .name }} - Format regex - Regex {{ .regex }} -{{- if .timeKey }} - Time_Key {{ .timeKey }} -{{- end }} -{{- if .timeFormat }} - Time_Format {{ .timeFormat }} -{{- end }} -{{ end }} -{{- end }} -{{- if .Values.parsers.json }} -{{- range .Values.parsers.json }} - [PARSER] - Name {{ .name }} - Format json -{{- if .timeKeep }} - Time_Keep {{ .timeKeep }} -{{- end }} -{{- if .timeKey }} - Time_Key {{ .timeKey }} -{{- end }} -{{- if .timeFormat }} - Time_Format {{ .timeFormat }} -{{- end }} -{{- if .decodeFieldAs }} - Decode_Field_As {{ .decodeFieldAs }} {{ .decodeField | default "log" }} -{{- end}} -{{- if .extraEntries }} -{{ .extraEntries | indent 8 }} -{{- end }} -{{ end }} -{{- end }} -{{- if .Values.parsers.logfmt }} -{{- range .Values.parsers.logfmt }} - [PARSER] - Name {{ .name }} - Format logfmt -{{- if .timeKey }} - Time_Key {{ .timeKey }} -{{- end }} -{{- if .timeFormat }} - Time_Format {{ .timeFormat }} -{{- end }} -{{- if .extraEntries }} -{{ .extraEntries | indent 8 }} -{{- end }} -{{ end }} -{{- end }} - -{{- end -}} + systemd.lua: | + function add_time(tag, timestamp, record) + new_record = {} + timeStr = os.date("!*t", timestamp["sec"]) + t = string.format("%4d-%02d-%02dT%02d:%02d:%02d.%sZ", + timeStr["year"], timeStr["month"], timeStr["day"], + timeStr["hour"], timeStr["min"], timeStr["sec"], + timestamp["nsec"]) + kubernetes = {} + kubernetes["pod_name"] = record["_HOSTNAME"] + kubernetes["container_name"] = record["SYSLOG_IDENTIFIER"] + kubernetes["namespace_name"] = "kube-system" + new_record["time"] = t + new_record["log"] = record["MESSAGE"] + new_record["kubernetes"] = kubernetes + return 1, timestamp, new_record + end + parsers.conf: '' +{{- end }} \ No newline at end of file diff --git a/deployment/deployment/quanxiang_charts/polyapi/templates/ingress.yaml b/deployment/deployment/quanxiang_charts/polyapi/templates/ingress.yaml index c82bf8f..2d067ef 100644 --- a/deployment/deployment/quanxiang_charts/polyapi/templates/ingress.yaml +++ b/deployment/deployment/quanxiang_charts/polyapi/templates/ingress.yaml @@ -3,6 +3,9 @@ apiVersion: networking.k8s.io/v1 metadata: name: polyapi-qxp namespace: {{ .Values.namespace }} + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: 30m spec: rules: {{- range .Values.ingress.hosts }} diff --git a/deployment/deployment/quanxiang_charts/polygate/templates/ingress.yaml b/deployment/deployment/quanxiang_charts/polygate/templates/ingress.yaml index 3f75948..ff478c6 100644 --- a/deployment/deployment/quanxiang_charts/polygate/templates/ingress.yaml +++ b/deployment/deployment/quanxiang_charts/polygate/templates/ingress.yaml @@ -5,6 +5,9 @@ metadata: namespace: {{ .Values.namespace }} labels: app.kubernetes.io/version: v1 + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: 30m spec: rules: {{- range .Values.ingress.hosts }} diff --git a/deployment/deployment/schemas/flow.sql b/deployment/deployment/schemas/flow.sql index 9d1c737..2121ede 100644 --- a/deployment/deployment/schemas/flow.sql +++ b/deployment/deployment/schemas/flow.sql @@ -300,6 +300,21 @@ CREATE TABLE `instance_execution` ( SET FOREIGN_KEY_CHECKS = 1; + +DROP TABLE IF EXISTS `flow_process_relation`; +CREATE TABLE `flow_process_relation` +( + `id` varchar(40) NOT NULL DEFAULT '' COMMENT '流程id', + `bpmn_text` text NOT NULL COMMENT '流程xml文件内容', + `flow_id` varchar(40) NOT NULL DEFAULT '' COMMENT 'flowID', + `process_id` varchar(40) NOT NULL DEFAULT '' COMMENT 'process中流程的id', + `creator_id` varchar(40) NOT NULL DEFAULT '' COMMENT '创建人', + `create_time` varchar(40) DEFAULT NULL COMMENT '创建时间', + `modifier_id` varchar(40) NOT NULL DEFAULT '' COMMENT '更新人', + `modify_time` varchar(40) DEFAULT NULL COMMENT '更新时间', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='流程实例关系表'; + INSERT INTO flow_variables (id,flow_id,name,`type`,code,field_type,format,default_value,`desc`,creator_id,create_time,modifier_id,modify_time) VALUES ('1','0','流程发起人','SYSTEM','flowVar_instanceCreatorName','string','','','','0','2021-09-14T14:30:18+0000','0','2021-09-14T14:30:18+0000'), ('2','0','流程发起时间','SYSTEM','flowVar_instanceCreateTime','datetime','','','','0','2021-09-14T14:30:18+0000','0','2021-09-14T14:30:18+0000'), @@ -316,4 +331,4 @@ alter table flow_variables alter table flow_instance_variables modify code varchar(200) default '' not null comment '变量标识'; -alter table flow_instance add request_id varchar(200) null; \ No newline at end of file +alter table flow_instance add request_id varchar(200) null; diff --git a/deployment/go.mod b/deployment/go.mod index 1171445..1fc05fc 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -6,15 +6,18 @@ require ( github.com/containerd/containerd v1.6.2 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker v20.10.14+incompatible - github.com/go-sql-driver/mysql v1.6.0 + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + github.com/onsi/gomega v1.18.1 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/satori/go.uuid v1.2.0 github.com/spf13/cobra v1.4.0 golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd + google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.1.0 // indirect - k8s.io/api v0.24.2 k8s.io/apimachinery v0.24.2 k8s.io/client-go v0.24.2 - sigs.k8s.io/controller-runtime v0.12.3 + sigs.k8s.io/yaml v1.3.0 ) diff --git a/deployment/go.sum b/deployment/go.sum index 05791c6..0aa1150 100644 --- a/deployment/go.sum +++ b/deployment/go.sum @@ -101,7 +101,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -110,11 +109,9 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= @@ -122,7 +119,6 @@ github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edY github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -135,10 +131,8 @@ github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= @@ -336,10 +330,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -375,8 +367,6 @@ github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -394,8 +384,6 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= @@ -414,13 +402,11 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -452,8 +438,6 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -494,7 +478,6 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -556,7 +539,6 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= @@ -585,7 +567,6 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -614,7 +595,6 @@ github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -628,7 +608,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= @@ -732,7 +711,6 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -744,13 +722,10 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -760,8 +735,6 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -773,7 +746,6 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -809,7 +781,6 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -874,7 +845,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -884,12 +854,9 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= @@ -924,20 +891,13 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -947,7 +907,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -955,7 +914,6 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -992,7 +950,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1047,7 +1004,6 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1173,11 +1129,9 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1270,14 +1224,11 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1345,7 +1296,6 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1460,8 +1410,6 @@ k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= @@ -1473,7 +1421,6 @@ k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= @@ -1481,13 +1428,10 @@ k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= -k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -1497,7 +1441,6 @@ k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -1524,9 +1467,6 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/deployment/pkg/configMysql.go b/deployment/pkg/configMysql.go index df6618a..a5cf73f 100644 --- a/deployment/pkg/configMysql.go +++ b/deployment/pkg/configMysql.go @@ -75,7 +75,7 @@ func deployMysql(kubeconfig, namespace, sqlName, depPath string, configs *Config mysqlUserPass := configs.Config.Mysql.Password for _, pod := range pods.Items { if strings.Contains(pod.Name, "mysql") { - command := "kubectl exec -it -n " + namespace + " --kubeconfig " + kubeconfig + " " + pod.Name + " -- mysql -h" + mysqlAddress + " -u" + mysqlUserName + " -p" + mysqlUserPass + " -P" + mysqlPort + " --default-character-set=utf8 < " + "./deployment/schemas/" + sqlName + command := "kubectl exec -it -n " + namespace + " --kubeconfig " + kubeconfig + " " + pod.Name + " -- mysql -h" + mysqlAddress + " -u" + mysqlUserName + " -p" + mysqlUserPass + " -P" + mysqlPort + " --default-character-set=utf8 < " + sqlName err := execBash(command) if err != nil { return err diff --git a/deployment/pkg/configs.go b/deployment/pkg/configs.go index 6f25db4..13413d4 100644 --- a/deployment/pkg/configs.go +++ b/deployment/pkg/configs.go @@ -94,10 +94,12 @@ type Configs struct { // Faas type Git struct { - KnownHosts string `yaml:"known_hosts"` - Privatekey string `yaml:"privatekey"` - GitSSh string `yaml:"gitSSh"` - Token string `yaml:"token"` + Host string `yaml:"host"` + KnownHostsScan string `yaml:"known_hosts_scan"` + SSHPrivatekey string `yaml:"sshPrivatekey"` + GitSSHAddress string `yaml:"gitSSHAddress"` + GitSSHPort int `yaml:"gitSSHPort"` + Token string `yaml:"token"` } // Faas @@ -108,9 +110,10 @@ type Faas struct { // Docker type Docker struct { - Server string `yaml:"server"` - Name string `yaml:"name"` - Pass string `yaml:"pass"` + Host string `yaml:"host"` + NameSpace string `yaml:"nameSpace"` + User string `yaml:"user"` + Pass string `yaml:"pass"` } type Persis struct { diff --git a/deployment/pkg/installFaas.go b/deployment/pkg/installFaas.go index 8a6f1b7..d7981ce 100644 --- a/deployment/pkg/installFaas.go +++ b/deployment/pkg/installFaas.go @@ -1,105 +1,117 @@ package pkg import ( - "context" + "encoding/base64" "errors" "fmt" - "path/filepath" + "os" + "strings" - _ "github.com/go-sql-driver/mysql" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/homedir" - ctrl "sigs.k8s.io/controller-runtime" + "gopkg.in/yaml.v2" ) -type Gits struct { - ID string `gorm:"column:id;type:varchar(64);PRIMARY_KEY" json:"id"` - Host string `gorm:"column:host;type:varchar(200);" json:"host"` - KnownHosts string `gorm:"column:known_hosts;type:text;" json:"knownHosts"` - SSH string `gorm:"column:ssh;type:text;" json:"ssh"` - Token string `gorm:"column:token;type:text;" json:"token"` - Name string `gorm:"column:name;type:varchar(200);" json:"name"` - - CreatedAt int64 `gorm:"column:created_at;type:bigint; " json:"createdAt,omitempty" ` - UpdatedAt int64 `gorm:"column:updated_at;type:bigint; " json:"updatedAt,omitempty" ` - DeletedAt int64 `gorm:"column:deleted_at;type:bigint; " json:"deletedAt,omitempty" ` - CreatedBy string `gorm:"column:created_by;type:varchar(64); " json:"createdBy,omitempty"` //创建者 - UpdatedBy string `gorm:"column:updated_by;type:varchar(64); " json:"updatedBy,omitempty"` //创建者 - DeletedBy string `gorm:"column:deleted_by;type:varchar(64); " json:"deletedBy,omitempty"` //删除者 - TenantID string `gorm:"column:tenant_id;type:varchar(64); " json:"tenantID"` //租户id +type SecretSSH struct { + Kind string `yaml:"kind"` // Secret + ApiVersion string `yaml:"apiVersion"` // v1 + Data SSHData `yaml:"data"` + Meta MetaData `yaml:"metadata"` + Type string `yaml:"type"` //kubernetes.io/ssh-auth } -type Dockers struct { - ID string `gorm:"column:id;type:varchar(64);PRIMARY_KEY" json:"id"` - Host string `gorm:"column:host;type:varchar(200);" json:"host"` - UserName string `gorm:"column:user_name;type:varchar(64);" json:"userName"` - NameSpace string `gorm:"column:name_space;type:varchar(64);" json:"nameSpace"` - Secret string `gorm:"column:secret;type:text;" json:"secret"` - Name string `gorm:"column:name;type:varchar(64);" json:"name"` - CreatedAt int64 `gorm:"column:created_at;type:bigint; " json:"createdAt,omitempty" ` - UpdatedAt int64 `gorm:"column:updated_at;type:bigint; " json:"updatedAt,omitempty" ` - DeletedAt int64 `gorm:"column:deleted_at;type:bigint; " json:"deletedAt,omitempty" ` - CreatedBy string `gorm:"column:created_by;type:varchar(64); " json:"createdBy,omitempty"` //创建者 - UpdatedBy string `gorm:"column:updated_by;type:varchar(64); " json:"updatedBy,omitempty"` //创建者 - DeletedBy string `gorm:"column:deleted_by;type:varchar(64); " json:"deletedBy,omitempty"` //删除者 - TenantID string `gorm:"column:tenant_id;type:varchar(64); " json:"tenantID"` //租户id +type SSHAnnota struct { + Token string `yaml:"tekton.dev/git-0"` // 'http://192.168.208.51:8080' +} +type MetaData struct { + Name string `yaml:"name" json:"name,omitempty"` // rsa + NameSpace string `yaml:"namespace" json:"name_space,omitempty"` //builder + Annotations SSHAnnota `yaml:"annotations" json:"annotations,omitempty"` +} +type SSHData struct { + KnownHosts string `yaml:"known_hosts"` //使用 ssh-keyscan -p 22端口 gitlab域名或者ip |base64 -w 0 生成 + SSHPrivatekey string `yaml:"ssh-privatekey"` //ssh-keygen -t rsa -f git_rsa -C "admin@quanxiang.dev" 生成ssh key, 将私钥使用base64编码, cat git_rsa|base64 -w 0 } -func applyGitSeret(host, know_host, ssh, kubeconfig, namespace string) error { - if kubeconfig == "" || kubeconfig == "~/.kube/config" { - if home := homedir.HomeDir(); home != "" { - kubeconfig = filepath.Join(home, ".kube", "config") - } else { - fmt.Println("-------请输入 -k 参数获取kubeconfig信息") - return errors.New("NO_KUBECONFIG") - } +func InitFaas(kubeconfig, namespace, depPath string, configs *Configs) error { + sqlFile := "./initfaas.sql" + knowHosts := fmt.Sprintf("ssh://git@%s:%d/", configs.Faas.Git.GitSSHAddress, configs.Faas.Git.GitSSHPort) + knowHostsScan := fmt.Sprintf("ssh://%s:%d/", configs.Faas.Git.GitSSHAddress, configs.Faas.Git.GitSSHPort) + sshKey := DecodeBase64String(configs.Faas.Git.SSHPrivatekey) + if sshKey == "" { + return errors.New("decoder ssh failed") } - config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + gitSql := fmt.Sprintf("insert into gits (id, host, token, name, known_hosts, key_scan_known_hosts, ssh) values('%s', '%s','%s', '%s','%s', '%s', '%s');", + "mzHjx1QR", configs.Faas.Git.Host, configs.Faas.Git.Token, "rsa", knowHosts, knowHostsScan, string(sshKey)) + dockerSql := fmt.Sprintf("insert into dockers (id, host, user_name, name_space, secret, name) values('aZhvb2qR', '%s', '%s', '%s', '%s', '%s');", + configs.Faas.Docker.Host, configs.Faas.Docker.User, configs.Faas.Docker.NameSpace, configs.Faas.Docker.Pass, "faas-docker") + dbUse := "USE faas;\n" + _, err := os.Stat(sqlFile) + if err == nil { + _ = os.Remove(sqlFile) + } + f, err := os.OpenFile(sqlFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { - panic(err.Error()) + return err } - - // create the clientset - clientset, err := kubernetes.NewForConfig(config) + defer f.Close() + str := fmt.Sprintf("%s\n%s\n%s\n", dbUse, gitSql, dockerSql) + _, err = f.Write([]byte(str)) if err != nil { - panic(err.Error()) + return err + } + err = deployMysql(kubeconfig, namespace, "./initfaas.sql", depPath, configs) + if err != nil { + return err } - secret := clientset.CoreV1().Secrets(namespace) - data := make(map[string][]byte) - // ssh-keyscan github.com | base64 - data["known_hosts"] = []byte(know_host) - data["ssh-privatekey"] = []byte(ssh) + return err +} + +func applyGitSecret(host, known_hosts, ssh, kubeconfig, namespace string) error { + var sshSecret SecretSSH + yamlFile := "./secret.yaml" - tekton := make(map[string]string) - tekton["tekton.dev/git-0"] = host + sshSecret.ApiVersion = "v1" + sshSecret.Kind = "Secret" + sshSecret.Type = "kubernetes.io/ssh-auth" + sshSecret.Data.KnownHosts = known_hosts + sshSecret.Meta.Annotations.Token = host + sshSecret.Meta.Name = "rsa" + sshSecret.Meta.NameSpace = namespace + sshSecret.Data.SSHPrivatekey = ssh - s := &v1.Secret{ - Type: v1.SecretTypeSSHAuth, - ObjectMeta: ctrl.ObjectMeta{ - Name: "rsa", - Namespace: namespace, - Annotations: tekton, - }, - Data: data, + sshBytes, err := yaml.Marshal(&sshSecret) + if err != nil { + fmt.Println(err) + return err + } + _, err = os.Stat(yamlFile) + if err == nil { + _ = os.Remove(yamlFile) + } + f, err := os.OpenFile(yamlFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write(sshBytes) + if err != nil { + fmt.Println(err) + return err } - options := metav1.CreateOptions{} - _, err = secret.Create(context.Background(), s, options) + command := fmt.Sprintf("kubectl apply -f %s --kubeconfig %s", yamlFile, kubeconfig) + err = execBash(command) if err != nil { return err } return err } -func applyHarbor(username, password, server string) error { - command := "kubectl create secret docker-registry faas-harbor --docker-username=" + username + " --docker-password=" + password + " --docker-server=" + server + " -n builder" +func applyHarbor(username, password, server, kubeconfig string) error { + command := "kubectl create secret docker-registry faas-docker --docker-username=" + username + " --docker-password=" + password + " --docker-server=" + server + " -n builder" + " --kubeconfig " + kubeconfig err := execBash(command) if err != nil { return err } - command = "kubectl create secret docker-registry faas-harbor --docker-username=" + username + " --docker-password=" + password + " --docker-server=" + server + " -n serving" + command = "kubectl create secret docker-registry faas-docker --docker-username=" + username + " --docker-password=" + password + " --docker-server=" + server + " -n serving" + " --kubeconfig " + kubeconfig err = execBash(command) if err != nil { return err @@ -107,29 +119,17 @@ func applyHarbor(username, password, server string) error { return nil } -/* -func applyFaasSql(msserver,msport,username,password,token,git_host,knows_hosts,ssh,docker_host,docker_username,docker_pass string) error { - gits := Gits{} - gits.ID="J2P57lAS" - gits.Host = git_host - gits.Token = token - gits.KnownHosts = knows_hosts - gits.Name = "rsa" - gits.SSH = ssh - fqn := username+ ":" + password+"@tcp("+msserver+":"+msport+")/faas?charset=utf8&parseTime=True&loc=Local" - db, err := gorm.Open("mysql", fqn) - if err != nil { - return err +func DecodeBase64String(enc string) string { + reader := strings.NewReader(enc) + decoder := base64.NewDecoder(base64.RawStdEncoding, reader) + buf := make([]byte, 1024) + dst := "" + for { + n, err := decoder.Read(buf) + dst += string(buf[:n]) + if err != nil || n == 0 { + break + } } - defer db.Close() - db.Create(&gits) - docker := Dockers{} - docker.ID = "1" - docker.Host = docker_host - docker.Name = "faas-harbor" - docker.UserName = docker_username - docker.Secret = docker_pass - db.Create(&docker) - return nil + return dst } -*/ diff --git a/deployment/pkg/modifyValues.go b/deployment/pkg/modifyValues.go index 0a5c783..9efe974 100644 --- a/deployment/pkg/modifyValues.go +++ b/deployment/pkg/modifyValues.go @@ -220,16 +220,17 @@ func ModifyValuesFile(filepath, namespace string, configs *Configs, ngGateWay bo value.Config.Etcd.Username = configs.Config.Etcd.Username value.Config.Etcd.Password = configs.Config.Etcd.Password } - if strings.Contains(filepath, "portal") { - value.Ingress.Hosts[0].Host = "portal." + configs.Domain - value.Websocket_hostname = "ws." + configs.Domain - value.Home_hostname = "home." + configs.Domain - value.Portal_hostname = "portal." + configs.Domain - value.Vendor.Hostname = "vendors." + configs.Domain - value.Vendor.Port = 80 - value.Vendor.Protocol = "http" - } */ + if strings.Contains(filepath, "portal") { + value.Ingress.Hosts[0].Host = "portal." + configs.Domain + value.Websocket_hostname = "ws." + configs.Domain + value.Home_hostname = "home." + configs.Domain + value.Portal_hostname = "portal." + configs.Domain + value.Vendor.Hostname = "vendors." + configs.Domain + value.Vendor.Port = 80 + value.Vendor.Protocol = "http" + } + if strings.Contains(filepath, "home") { value.Ingress.Hosts[0].Host = "home." + configs.Domain value.Websocket_hostname = "ws." + configs.Domain diff --git a/deployment/pkg/startInstall.go b/deployment/pkg/startInstall.go index 525a195..7057516 100644 --- a/deployment/pkg/startInstall.go +++ b/deployment/pkg/startInstall.go @@ -78,7 +78,8 @@ func Start(kubeconfig, namespace, configFile, depFile, registry, regisUser, regi fmt.Println("----------------------------------------->开始初始化mysql") releases, _ := ioutil.ReadDir(depFile + "/schemas") for _, release := range releases { - err := deployMysql(kubeconfig, namespace, release.Name(), depFile, configs) + sqlName := "./deployment/schemas/" + release.Name() + err := deployMysql(kubeconfig, namespace, sqlName, depFile, configs) if err != nil { fmt.Println("---------------------------------------> 数据库初始化失败") return @@ -101,16 +102,24 @@ func Start(kubeconfig, namespace, configFile, depFile, registry, regisUser, regi } } var command string - switch { - case strings.Contains(release.Name(), "builder"): + switch release.Name() { + case "builder": command = fmt.Sprintf("helm install %s %s/quanxiang_charts/%s --kubeconfig %s -n builder --set namespace=%s --set lowcode=%s --timeout 1800s --create-namespace", release.Name(), depFile, release.Name(), kubeconfig, "builder", namespace) - case strings.Contains(release.Name(), "serving"): + case "serving": command = fmt.Sprintf("helm install %s %s/quanxiang_charts/%s --kubeconfig %s -n serving --set namespace=%s --timeout 1800s --create-namespace", release.Name(), depFile, release.Name(), kubeconfig, "serving") - case strings.Contains(release.Name(), "fluent"): + case "fluent": command = fmt.Sprintf("helm install %s %s/quanxiang_charts/%s --kubeconfig %s -n builder --set namespace=%s --timeout 1800s --create-namespace", release.Name(), depFile, release.Name(), kubeconfig, "builder") + case "fluent-bit": + eshost, err := AddrParase(configs.Config.Elastic.Host[0], namespace) + if err != nil { + fmt.Println(err) + } + esHosts := strings.Split(eshost, ":") + command = fmt.Sprintf("helm install %s %s/quanxiang_charts/%s --kubeconfig %s -n builder --set namespace=%s --timeout 1800s --set backend.es.host=%s --set backend.es.port=%s --create-namespace", + release.Name(), depFile, release.Name(), kubeconfig, "builder", esHosts[1][2:], esHosts[2]) default: command = fmt.Sprintf("helm install %s %s/quanxiang_charts/%s --kubeconfig %s -n %s --set namespace=%s --timeout 1800s --create-namespace", release.Name(), depFile, release.Name(), kubeconfig, namespace, namespace) @@ -126,19 +135,24 @@ func Start(kubeconfig, namespace, configFile, depFile, registry, regisUser, regi fmt.Println(err) return } - err = applyGitSeret(configs.Faas.Git.GitSSh, configs.Faas.Git.KnownHosts, configs.Faas.Git.Privatekey, kubeconfig, namespace) + err = applyGitSecret(configs.Faas.Git.Host, configs.Faas.Git.KnownHostsScan, configs.Faas.Git.SSHPrivatekey, kubeconfig, "builder") if err != nil { fmt.Println(err) - return } - err = applyHarbor(configs.Faas.Docker.Name, configs.Faas.Docker.Pass, configs.Faas.Docker.Server) + err = applyHarbor(configs.Faas.Docker.User, configs.Faas.Docker.Pass, configs.Faas.Docker.Host, kubeconfig) if err != nil { fmt.Println(err) - return } - command := "helm install searchindex " + depFile + "/search_index" + " --kubeconfig " + kubeconfig + " -n " + namespace + " --timeout 1800s" - execBash(command) - command = "helm install auth " + depFile + "/portalauth" + " --kubeconfig " + kubeconfig + " -n " + namespace + " --timeout 1800s" - execBash(command) + + err = InitFaas(kubeconfig, namespace, depFile, configs) + if err != nil { + fmt.Println(err) + } + /* + command := "helm install searchindex " + depFile + "/search_index" + " --kubeconfig " + kubeconfig + " -n " + namespace + " --timeout 1800s" + execBash(command) + command = "helm install auth " + depFile + "/portalauth" + " --kubeconfig " + kubeconfig + " -n " + namespace + " --timeout 1800s" + execBash(command) + */ fmt.Println("----------------------------------------->部署完成") } diff --git a/deployment/pkg/uninstall.go b/deployment/pkg/uninstall.go index cecda3b..92478f3 100644 --- a/deployment/pkg/uninstall.go +++ b/deployment/pkg/uninstall.go @@ -95,6 +95,20 @@ func UninstallServece(namespace, depPath, kubeconfig string, uninstallMiddlerwar } } } - + command := "kubectl delete secret rsa -n builder --kubeconfig " + kubeconfig + err = execBash(command) + if err != nil { + fmt.Println(err) + } + command = "kubectl delete secret faas-docker -n serving --kubeconfig " + kubeconfig + err = execBash(command) + if err != nil { + fmt.Println(err) + } + command = "kubectl delete secret faas-docker -n builder" + " --kubeconfig " + kubeconfig + err = execBash(command) + if err != nil { + fmt.Println(err) + } return nil } diff --git a/flow b/flow index 761a8d7..ed14a92 160000 --- a/flow +++ b/flow @@ -1 +1 @@ -Subproject commit 761a8d7db45d690c6f940e83c3717e1af596fe89 +Subproject commit ed14a925194c60fe57a93cfca7ca63aeee98149c From a20c5317950315c833ea2542b9fbad73cecfd0b4 Mon Sep 17 00:00:00 2001 From: Kevin Li Date: Tue, 25 Oct 2022 15:32:59 +0800 Subject: [PATCH 2/2] feat: 2.0.0 release --- deployment/configs/configs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/configs/configs.yml b/deployment/configs/configs.yml index 79713ec..555ef36 100644 --- a/deployment/configs/configs.yml +++ b/deployment/configs/configs.yml @@ -21,7 +21,7 @@ minio: #Service profile 服务的配置文件 image: repo: docker.io/quanxiang - tag: v1.1.2 + tag: v2.0.0 imagePullSecrets: "" domain: example.com #设置访问平台的域名 persis: